Commit 1ddee6d8 authored by David S. Miller's avatar David S. Miller

Merge branch 'gianfar-some-assorted-cleanup'

Arseny Solokha says:

====================
gianfar: some assorted cleanup

This is a cleanup series for the gianfar Ethernet driver, following up a
discussion in [1]. It is intended to precede a conversion of gianfar from
PHYLIB to PHYLINK API, which will be submitted later in its version 2.
However, it won't make a conversion cleaner, except for the last patch in
this series. Obviously this series is not intended for -stable.

The first patch looks super controversial to me, as it moves lots of code
around for the sole purpose of getting rid of static forward declarations
in two translation units. On the other hand, this change is purely
mechanical and cannot do any harm other than cluttering git blame output.
I can prepare an alternative patch for only swapping adjacent functions
around, if necessary.

The second patch is a trivial follow-up to the first one, making functions
that are only called from the same translation unit static.

The third patch removes some now unused macro and structure definitions
from gianfar.h, slipped away from various cleanups in the past.

The fourth patch, also suggested in [1], makes the driver consistently use
PHY connection type value obtained from a Device Tree node, instead of
ignoring it and using the one auto-detected by MAC, when connecting to PHY.
Obviously a value has to be specified correctly in DT source, or omitted
altogether, in which case the driver will fall back to auto-detection. When
querying a DT node, the driver will also take both applicable properties
into account by making a proper API call instead of open-coding the lookup
half-way correctly.

[1] https://lore.kernel.org/netdev/CA+h21hruqt6nGG5ksDSwrGH_w5GtGF4fjAMCWJne7QJrjusERQ@mail.gmail.com/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d1e8496e 8e578e73
......@@ -105,43 +105,6 @@
const char gfar_driver_version[] = "2.0";
static int gfar_enet_open(struct net_device *dev);
static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
int alloc_cnt);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
static noinline void gfar_update_link_state(struct gfar_private *priv);
static int init_phy(struct net_device *dev);
static int gfar_probe(struct platform_device *ofdev);
static int gfar_remove(struct platform_device *ofdev);
static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
static void gfar_configure_serdes(struct net_device *dev);
static int gfar_poll_rx(struct napi_struct *napi, int budget);
static int gfar_poll_tx(struct napi_struct *napi, int budget);
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
const u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");
......@@ -162,138 +125,6 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
bdp->lstatus = cpu_to_be32(lstatus);
}
static void gfar_init_bds(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
u32 __iomem *rfbptr;
int i, j;
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
/* Initialize some variables in our dev structure */
tx_queue->num_txbdfree = tx_queue->tx_ring_size;
tx_queue->dirty_tx = tx_queue->tx_bd_base;
tx_queue->cur_tx = tx_queue->tx_bd_base;
tx_queue->skb_curtx = 0;
tx_queue->skb_dirtytx = 0;
/* Initialize Transmit Descriptor Ring */
txbdp = tx_queue->tx_bd_base;
for (j = 0; j < tx_queue->tx_ring_size; j++) {
txbdp->lstatus = 0;
txbdp->bufPtr = 0;
txbdp++;
}
/* Set the last descriptor in the ring to indicate wrap */
txbdp--;
txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
TXBD_WRAP);
}
rfbptr = &regs->rfbptr0;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->next_to_clean = 0;
rx_queue->next_to_use = 0;
rx_queue->next_to_alloc = 0;
/* make sure next_to_clean != next_to_use after this
* by leaving at least 1 unused descriptor
*/
gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
rx_queue->rfbptr = rfbptr;
rfbptr += 2;
}
}
static int gfar_alloc_skb_resources(struct net_device *ndev)
{
void *vaddr;
dma_addr_t addr;
int i, j;
struct gfar_private *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
priv->total_tx_ring_size = 0;
for (i = 0; i < priv->num_tx_queues; i++)
priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
priv->total_rx_ring_size = 0;
for (i = 0; i < priv->num_rx_queues; i++)
priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev,
(priv->total_tx_ring_size *
sizeof(struct txbd8)) +
(priv->total_rx_ring_size *
sizeof(struct rxbd8)),
&addr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_bd_base = vaddr;
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
}
/* Start the rx descriptor ring where the tx ring leaves off */
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->ndev = ndev;
rx_queue->dev = dev;
addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_skbuff =
kmalloc_array(tx_queue->tx_ring_size,
sizeof(*tx_queue->tx_skbuff),
GFP_KERNEL);
if (!tx_queue->tx_skbuff)
goto cleanup;
for (j = 0; j < tx_queue->tx_ring_size; j++)
tx_queue->tx_skbuff[j] = NULL;
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
sizeof(*rx_queue->rx_buff),
GFP_KERNEL);
if (!rx_queue->rx_buff)
goto cleanup;
}
gfar_init_bds(ndev);
return 0;
cleanup:
free_skb_resources(priv);
return -ENOMEM;
}
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
......@@ -444,7 +275,7 @@ static void gfar_configure_coalescing(struct gfar_private *priv,
}
}
void gfar_configure_coalescing_all(struct gfar_private *priv)
static void gfar_configure_coalescing_all(struct gfar_private *priv)
{
gfar_configure_coalescing(priv, 0xFF, 0xFF);
}
......@@ -477,6 +308,62 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
return &dev->stats;
}
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
* 1) Take the Destination Address (ie the multicast address), and
* do a CRC on it (little endian), and reverse the bits of the
* result.
* 2) Use the 8 most significant bits as a hash into a 256-entry
* table. The table is controlled through 8 32-bit registers:
* gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
* gaddr7. This means that the 3 most significant bits in the
* hash index which gaddr register to use, and the 5 other bits
* indicate which bit (assuming an IBM numbering scheme, which
* for PowerPC (tm) is usually the case) in the register holds
* the entry.
*/
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
struct gfar_private *priv = netdev_priv(dev);
u32 result = ether_crc(ETH_ALEN, addr);
int width = priv->hash_width;
u8 whichbit = (result >> (32 - width)) & 0x1f;
u8 whichreg = result >> (32 - width + 5);
u32 value = (1 << (31-whichbit));
tempval = gfar_read(priv->hash_regs[whichreg]);
tempval |= value;
gfar_write(priv->hash_regs[whichreg], tempval);
}
/* There are multiple MAC Address register pairs on some controllers
* This function sets the numth pair to a given address
*/
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
const u8 *addr)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
macptr += num*2;
/* For a station address of 0x12345678ABCD in transmission
* order (BE), MACnADDR1 is set to 0xCDAB7856 and
* MACnADDR2 is set to 0x34120000.
*/
tempval = (addr[5] << 24) | (addr[4] << 16) |
(addr[3] << 8) | addr[2];
gfar_write(macptr, tempval);
tempval = (addr[1] << 24) | (addr[0] << 16);
gfar_write(macptr+1, tempval);
}
static int gfar_set_mac_addr(struct net_device *dev, void *p)
{
eth_mac_addr(dev, p);
......@@ -486,24 +373,6 @@ static int gfar_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
.ndo_stop = gfar_close,
.ndo_change_mtu = gfar_change_mtu,
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
#endif
};
static void gfar_ints_disable(struct gfar_private *priv)
{
int i;
......@@ -723,10 +592,53 @@ static int gfar_of_group_count(struct device_node *np)
return num;
}
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
*/
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 ecntrl;
ecntrl = gfar_read(&regs->ecntrl);
if (ecntrl & ECNTRL_SGMII_MODE)
return PHY_INTERFACE_MODE_SGMII;
if (ecntrl & ECNTRL_TBI_MODE) {
if (ecntrl & ECNTRL_REDUCED_MODE)
return PHY_INTERFACE_MODE_RTBI;
else
return PHY_INTERFACE_MODE_TBI;
}
if (ecntrl & ECNTRL_REDUCED_MODE) {
if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
return PHY_INTERFACE_MODE_RMII;
}
else {
phy_interface_t interface = priv->interface;
/* This isn't autodetected right now, so it must
* be set by the device tree or platform code.
*/
if (interface == PHY_INTERFACE_MODE_RGMII_ID)
return PHY_INTERFACE_MODE_RGMII_ID;
return PHY_INTERFACE_MODE_RGMII;
}
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
return PHY_INTERFACE_MODE_GMII;
return PHY_INTERFACE_MODE_MII;
}
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
{
const char *model;
const char *ctype;
const void *mac_addr;
int err = 0, i;
struct net_device *dev = NULL;
......@@ -889,13 +801,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_TIMER |
FSL_GIANFAR_DEV_HAS_RX_FILER;
err = of_property_read_string(np, "phy-connection-type", &ctype);
/* We only care about rgmii-id. The rest are autodetected */
if (err == 0 && !strcmp(ctype, "rgmii-id"))
priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
/* Use PHY connection type from the DT node if one is specified there.
* rgmii-id really needs to be specified. Other types can be
* detected by hardware
*/
err = of_get_phy_mode(np);
if (err >= 0)
priv->interface = err;
else
priv->interface = PHY_INTERFACE_MODE_MII;
priv->interface = gfar_get_interface(dev);
if (of_find_property(np, "fsl,magic-packet", NULL))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
......@@ -931,85 +845,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return err;
}
static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* reserved for future extensions */
if (config.flags)
return -EINVAL;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
case HWTSTAMP_TX_ON:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
priv->hwts_tx_en = 1;
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
priv->hwts_rx_en = 0;
reset_gfar(netdev);
}
break;
default:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
if (!priv->hwts_rx_en) {
priv->hwts_rx_en = 1;
reset_gfar(netdev);
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
config.flags = 0;
config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
config.rx_filter = (priv->hwts_rx_en ?
HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
return gfar_hwtstamp_set(dev, rq);
if (cmd == SIOCGHWTSTAMP)
return gfar_hwtstamp_get(dev, rq);
if (!phydev)
return -ENODEV;
return phy_mii_ioctl(phydev, rq, cmd);
}
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
u32 class)
{
......@@ -1133,135 +968,6 @@ static void gfar_detect_errata(struct gfar_private *priv)
priv->errata);
}
void gfar_mac_reset(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
/* Reset MAC layer */
gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
/* We need to delay at least 3 TX clocks */
udelay(3);
/* the soft reset bit is not self-resetting, so we need to
* clear it before resuming normal operation
*/
gfar_write(&regs->maccfg1, 0);
udelay(3);
gfar_rx_offload_en(priv);
/* Initialize the max receive frame/buffer lengths */
gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
/* Initialize the Minimum Frame Length Register */
gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
* are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
* and by checking RxBD[LG] and discarding larger than MAXFRM.
*/
if (gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
gfar_write(&regs->maccfg2, tempval);
/* Clear mac addr hash registers */
gfar_write(&regs->igaddr0, 0);
gfar_write(&regs->igaddr1, 0);
gfar_write(&regs->igaddr2, 0);
gfar_write(&regs->igaddr3, 0);
gfar_write(&regs->igaddr4, 0);
gfar_write(&regs->igaddr5, 0);
gfar_write(&regs->igaddr6, 0);
gfar_write(&regs->igaddr7, 0);
gfar_write(&regs->gaddr0, 0);
gfar_write(&regs->gaddr1, 0);
gfar_write(&regs->gaddr2, 0);
gfar_write(&regs->gaddr3, 0);
gfar_write(&regs->gaddr4, 0);
gfar_write(&regs->gaddr5, 0);
gfar_write(&regs->gaddr6, 0);
gfar_write(&regs->gaddr7, 0);
if (priv->extended_hash)
gfar_clear_exact_match(priv->ndev);
gfar_mac_rx_config(priv);
gfar_mac_tx_config(priv);
gfar_set_mac_address(priv->ndev);
gfar_set_multi(priv->ndev);
/* clear ievent and imask before configuring coalescing */
gfar_ints_disable(priv);
/* Configure the coalescing support */
gfar_configure_coalescing_all(priv);
}
static void gfar_hw_init(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 attrs;
/* Stop the DMA engine now, in case it was running before
* (The firmware could have used it, and left it running).
*/
gfar_halt(priv);
gfar_mac_reset(priv);
/* Zero out the rmon mib registers if it has them */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
/* Mask off the CAM interrupts */
gfar_write(&regs->rmon.cam1, 0xffffffff);
gfar_write(&regs->rmon.cam2, 0xffffffff);
}
/* Initialize ECNTRL */
gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
/* Set the extraction length and index */
attrs = ATTRELI_EL(priv->rx_stash_size) |
ATTRELI_EI(priv->rx_stash_index);
gfar_write(&regs->attreli, attrs);
/* Start with defaults, and add stashing
* depending on driver parameters
*/
attrs = ATTR_INIT_SETTINGS;
if (priv->bd_stash_en)
attrs |= ATTR_BDSTASH;
if (priv->rx_stash_size != 0)
attrs |= ATTR_BUFSTASH;
gfar_write(&regs->attr, attrs);
/* FIFO configs */
gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
/* Program the interrupt steering regs, only for MG devices */
if (priv->num_grps > 1)
gfar_write_isrg(priv);
}
static void gfar_init_addr_hash_table(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
......@@ -1302,285 +1008,202 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv)
}
}
/* Set up the ethernet device structure, private data,
* and anything else we need before we start
*/
static int gfar_probe(struct platform_device *ofdev)
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
int err = 0, i;
u32 res;
err = gfar_of_init(ofdev, &dev);
/* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
return 0;
if (err)
return err;
/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
* the same as bits 23-30, the eTSEC Rx is assumed to be idle
* and the Rx can be safely reset.
*/
res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
res &= 0x7f807f80;
if ((res & 0xffff) == (res >> 16))
return 1;
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
return 0;
}
INIT_WORK(&priv->reset_task, gfar_reset_task);
/* Halt the receive and transmit queues */
static void gfar_halt_nodisable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
unsigned int timeout;
int stopped;
platform_set_drvdata(ofdev, priv);
gfar_ints_disable(priv);
gfar_detect_errata(priv);
if (gfar_is_dma_stopped(priv))
return;
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
/* MTU range: 50 - 9586 */
dev->mtu = 1500;
dev->min_mtu = 50;
dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
dev->netdev_ops = &gfar_netdev_ops;
dev->ethtool_ops = &gfar_ethtool_ops;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
if (priv->poll_mode == GFAR_SQ_POLLING) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx_sq, 2);
} else {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
gfar_poll_rx, GFAR_DEV_WEIGHT);
netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx, 2);
}
retry:
timeout = 1000;
while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
cpu_relax();
timeout--;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
if (!timeout)
stopped = gfar_is_dma_stopped(priv);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
!__gfar_is_rx_idle(priv))
goto retry;
}
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Halt the receive and transmit queues */
static void gfar_halt(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
gfar_init_addr_hash_table(priv);
/* Dissable the Rx/Tx hw queues */
gfar_write(&regs->rqueue, 0);
gfar_write(&regs->tqueue, 0);
/* Insert receive time stamps into padding alignment bytes, and
* plus 2 bytes padding to ensure the cpu alignment.
*/
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
priv->padding = 8 + DEFAULT_PADDING;
mdelay(10);
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
gfar_halt_nodisable(priv);
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
priv->tx_queue[i]->txic = DEFAULT_TXIC;
}
/* Disable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
}
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
{
struct txbd8 *txbdp;
struct gfar_private *priv = netdev_priv(tx_queue->dev);
int i, j;
/* Always enable rx filer if available */
priv->rx_filer_enable =
(priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
/* use pritority h/w tx queue scheduling for single queue devices */
if (priv->num_tx_queues == 1)
priv->prio_sched_en = 1;
txbdp = tx_queue->tx_bd_base;
set_bit(GFAR_DOWN, &priv->state);
for (i = 0; i < tx_queue->tx_ring_size; i++) {
if (!tx_queue->tx_skbuff[i])
continue;
gfar_hw_init(priv);
dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) {
txbdp++;
dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length),
DMA_TO_DEVICE);
}
txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
tx_queue->tx_skbuff[i] = NULL;
}
kfree(tx_queue->tx_skbuff);
tx_queue->tx_skbuff = NULL;
}
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
int i;
err = register_netdev(dev);
struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
if (err) {
pr_err("%s: Cannot register net device, aborting\n", dev->name);
goto register_fail;
}
dev_kfree_skb(rx_queue->skb);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
priv->wol_supported |= GFAR_WOL_MAGIC;
for (i = 0; i < rx_queue->rx_ring_size; i++) {
struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
priv->rx_filer_enable)
priv->wol_supported |= GFAR_WOL_FILER_UCAST;
rxbdp->lstatus = 0;
rxbdp->bufPtr = 0;
rxbdp++;
device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
if (!rxb->page)
continue;
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
struct gfar_priv_grp *grp = &priv->gfargrp[i];
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_tx");
sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_rx");
sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_er");
} else
strcpy(gfar_irq(grp, TX)->name, dev->name);
dma_unmap_page(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;
}
/* Initialize the filer table */
gfar_init_filer_table(priv);
kfree(rx_queue->rx_buff);
rx_queue->rx_buff = NULL;
}
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
/* If there are any tx skbs or rx skbs still around, free them.
* Then free tx_skbuff and rx_skbuff
*/
static void free_skb_resources(struct gfar_private *priv)
{
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
/* Even more device info helps when determining which kernel
* provided which set of benchmarks.
*/
netdev_info(dev, "Running with NAPI enabled\n");
for (i = 0; i < priv->num_rx_queues; i++)
netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
i, priv->rx_queue[i]->rx_ring_size);
for (i = 0; i < priv->num_tx_queues; i++)
netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
i, priv->tx_queue[i]->tx_ring_size);
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
struct netdev_queue *txq;
return 0;
tx_queue = priv->tx_queue[i];
txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
if (tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
netdev_tx_reset_queue(txq);
}
register_fail:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
unmap_group_regs(priv);
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
of_node_put(priv->phy_node);
of_node_put(priv->tbi_node);
free_gfar_dev(priv);
return err;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
if (rx_queue->rx_buff)
free_skb_rx_queue(rx_queue);
}
dma_free_coherent(priv->dev,
sizeof(struct txbd8) * priv->total_tx_ring_size +
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
}
static int gfar_remove(struct platform_device *ofdev)
void stop_gfar(struct net_device *dev)
{
struct gfar_private *priv = platform_get_drvdata(ofdev);
struct device_node *np = ofdev->dev.of_node;
of_node_put(priv->phy_node);
of_node_put(priv->tbi_node);
unregister_netdev(priv->ndev);
struct gfar_private *priv = netdev_priv(dev);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
netif_tx_stop_all_queues(dev);
unmap_group_regs(priv);
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
free_gfar_dev(priv);
smp_mb__before_atomic();
set_bit(GFAR_DOWN, &priv->state);
smp_mb__after_atomic();
return 0;
}
disable_napi(priv);
#ifdef CONFIG_PM
/* disable ints and gracefully shut down Rx/Tx DMA */
gfar_halt(priv);
static void __gfar_filer_disable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 temp;
phy_stop(dev->phydev);
temp = gfar_read(&regs->rctrl);
temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
gfar_write(&regs->rctrl, temp);
free_skb_resources(priv);
}
static void __gfar_filer_enable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 temp;
temp = gfar_read(&regs->rctrl);
temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
gfar_write(&regs->rctrl, temp);
}
/* Filer rules implementing wol capabilities */
static void gfar_filer_config_wol(struct gfar_private *priv)
{
unsigned int i;
u32 rqfcr;
__gfar_filer_disable(priv);
/* clear the filer table, reject any packet by default */
rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
for (i = 0; i <= MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, rqfcr, 0);
i = 0;
if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
/* unicast packet, accept it */
struct net_device *ndev = priv->ndev;
/* get the default rx queue index */
u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
(ndev->dev_addr[1] << 8) |
ndev->dev_addr[2];
rqfcr = (qindex << 10) | RQFCR_AND |
RQFCR_CMP_EXACT | RQFCR_PID_DAH;
gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
dest_mac_addr = (ndev->dev_addr[3] << 16) |
(ndev->dev_addr[4] << 8) |
ndev->dev_addr[5];
rqfcr = (qindex << 10) | RQFCR_GPI |
RQFCR_CMP_EXACT | RQFCR_PID_DAL;
gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
}
__gfar_filer_enable(priv);
}
static void gfar_filer_restore_table(struct gfar_private *priv)
{
u32 rqfcr, rqfpr;
unsigned int i;
__gfar_filer_disable(priv);
for (i = 0; i <= MAX_FILER_IDX; i++) {
rqfcr = priv->ftp_rqfcr[i];
rqfpr = priv->ftp_rqfpr[i];
gfar_write_filer(priv, i, rqfcr, rqfpr);
}
__gfar_filer_enable(priv);
}
/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
static void gfar_start_wol_filer(struct gfar_private *priv)
static void gfar_start(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
int i = 0;
/* Enable Rx hw queues */
/* Enable Rx/Tx hw queues */
gfar_write(&regs->rqueue, priv->rqueue);
gfar_write(&regs->tqueue, priv->tqueue);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
......@@ -1589,670 +1212,503 @@ static void gfar_start_wol_filer(struct gfar_private *priv)
/* Make sure we aren't stopped */
tempval = gfar_read(&regs->dmactrl);
tempval &= ~DMACTRL_GRS;
tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
/* Clear RHLT, so that the DMA starts polling now */
/* Clear THLT/RHLT, so that the DMA starts polling now */
gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
/* enable the Filer General Purpose Interrupt */
gfar_write(&regs->imask, IMASK_FGPI);
}
/* Enable Rx DMA */
/* Enable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval |= MACCFG1_RX_EN;
tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
}
static int gfar_suspend(struct device *dev)
{
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
u16 wol = priv->wol_opts;
gfar_ints_enable(priv);
if (!netif_running(ndev))
return 0;
netif_trans_update(priv->ndev); /* prevent tx timeout */
}
disable_napi(priv);
netif_tx_lock(ndev);
netif_device_detach(ndev);
netif_tx_unlock(ndev);
static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
{
struct page *page;
dma_addr_t addr;
gfar_halt(priv);
page = dev_alloc_page();
if (unlikely(!page))
return false;
if (wol & GFAR_WOL_MAGIC) {
/* Enable interrupt on Magic Packet */
gfar_write(&regs->imask, IMASK_MAG);
addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rxq->dev, addr))) {
__free_page(page);
/* Enable Magic Packet mode */
tempval = gfar_read(&regs->maccfg2);
tempval |= MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
return false;
}
/* re-enable the Rx block */
tempval = gfar_read(&regs->maccfg1);
tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
rxb->dma = addr;
rxb->page = page;
rxb->page_offset = 0;
} else if (wol & GFAR_WOL_FILER_UCAST) {
gfar_filer_config_wol(priv);
gfar_start_wol_filer(priv);
return true;
}
} else {
phy_stop(ndev->phydev);
}
static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
{
struct gfar_private *priv = netdev_priv(rx_queue->ndev);
struct gfar_extra_stats *estats = &priv->extra_stats;
return 0;
netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
atomic64_inc(&estats->rx_alloc_err);
}
static int gfar_resume(struct device *dev)
static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
int alloc_cnt)
{
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
u16 wol = priv->wol_opts;
if (!netif_running(ndev))
return 0;
struct rxbd8 *bdp;
struct gfar_rx_buff *rxb;
int i;
if (wol & GFAR_WOL_MAGIC) {
/* Disable Magic Packet mode */
tempval = gfar_read(&regs->maccfg2);
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
i = rx_queue->next_to_use;
bdp = &rx_queue->rx_bd_base[i];
rxb = &rx_queue->rx_buff[i];
} else if (wol & GFAR_WOL_FILER_UCAST) {
/* need to stop rx only, tx is already down */
gfar_halt(priv);
gfar_filer_restore_table(priv);
while (alloc_cnt--) {
/* try reuse page */
if (unlikely(!rxb->page)) {
if (unlikely(!gfar_new_page(rx_queue, rxb))) {
gfar_rx_alloc_err(rx_queue);
break;
}
}
} else {
phy_start(ndev->phydev);
}
/* Setup the new RxBD */
gfar_init_rxbdp(rx_queue, bdp,
rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
gfar_start(priv);
/* Update to the next pointer */
bdp++;
rxb++;
netif_device_attach(ndev);
enable_napi(priv);
if (unlikely(++i == rx_queue->rx_ring_size)) {
i = 0;
bdp = rx_queue->rx_bd_base;
rxb = rx_queue->rx_buff;
}
}
return 0;
rx_queue->next_to_use = i;
rx_queue->next_to_alloc = i;
}
static int gfar_restore(struct device *dev)
static void gfar_init_bds(struct net_device *ndev)
{
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
if (!netif_running(ndev)) {
netif_device_attach(ndev);
return 0;
}
gfar_init_bds(ndev);
struct gfar_private *priv = netdev_priv(ndev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
u32 __iomem *rfbptr;
int i, j;
gfar_mac_reset(priv);
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
/* Initialize some variables in our dev structure */
tx_queue->num_txbdfree = tx_queue->tx_ring_size;
tx_queue->dirty_tx = tx_queue->tx_bd_base;
tx_queue->cur_tx = tx_queue->tx_bd_base;
tx_queue->skb_curtx = 0;
tx_queue->skb_dirtytx = 0;
gfar_init_tx_rx_base(priv);
/* Initialize Transmit Descriptor Ring */
txbdp = tx_queue->tx_bd_base;
for (j = 0; j < tx_queue->tx_ring_size; j++) {
txbdp->lstatus = 0;
txbdp->bufPtr = 0;
txbdp++;
}
gfar_start(priv);
/* Set the last descriptor in the ring to indicate wrap */
txbdp--;
txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
TXBD_WRAP);
}
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
rfbptr = &regs->rfbptr0;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
if (ndev->phydev)
phy_start(ndev->phydev);
rx_queue->next_to_clean = 0;
rx_queue->next_to_use = 0;
rx_queue->next_to_alloc = 0;
netif_device_attach(ndev);
enable_napi(priv);
/* make sure next_to_clean != next_to_use after this
* by leaving at least 1 unused descriptor
*/
gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
return 0;
rx_queue->rfbptr = rfbptr;
rfbptr += 2;
}
}
static const struct dev_pm_ops gfar_pm_ops = {
.suspend = gfar_suspend,
.resume = gfar_resume,
.freeze = gfar_suspend,
.thaw = gfar_resume,
.restore = gfar_restore,
};
static int gfar_alloc_skb_resources(struct net_device *ndev)
{
void *vaddr;
dma_addr_t addr;
int i, j;
struct gfar_private *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
#define GFAR_PM_OPS (&gfar_pm_ops)
priv->total_tx_ring_size = 0;
for (i = 0; i < priv->num_tx_queues; i++)
priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
#else
priv->total_rx_ring_size = 0;
for (i = 0; i < priv->num_rx_queues; i++)
priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
#define GFAR_PM_OPS NULL
/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev,
(priv->total_tx_ring_size *
sizeof(struct txbd8)) +
(priv->total_rx_ring_size *
sizeof(struct rxbd8)),
&addr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
#endif
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_bd_base = vaddr;
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
}
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
*/
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 ecntrl;
/* Start the rx descriptor ring where the tx ring leaves off */
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->ndev = ndev;
rx_queue->dev = dev;
addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
ecntrl = gfar_read(&regs->ecntrl);
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_skbuff =
kmalloc_array(tx_queue->tx_ring_size,
sizeof(*tx_queue->tx_skbuff),
GFP_KERNEL);
if (!tx_queue->tx_skbuff)
goto cleanup;
if (ecntrl & ECNTRL_SGMII_MODE)
return PHY_INTERFACE_MODE_SGMII;
for (j = 0; j < tx_queue->tx_ring_size; j++)
tx_queue->tx_skbuff[j] = NULL;
}
if (ecntrl & ECNTRL_TBI_MODE) {
if (ecntrl & ECNTRL_REDUCED_MODE)
return PHY_INTERFACE_MODE_RTBI;
else
return PHY_INTERFACE_MODE_TBI;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
sizeof(*rx_queue->rx_buff),
GFP_KERNEL);
if (!rx_queue->rx_buff)
goto cleanup;
}
if (ecntrl & ECNTRL_REDUCED_MODE) {
if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
return PHY_INTERFACE_MODE_RMII;
}
else {
phy_interface_t interface = priv->interface;
gfar_init_bds(ndev);
/* This isn't autodetected right now, so it must
* be set by the device tree or platform code.
*/
if (interface == PHY_INTERFACE_MODE_RGMII_ID)
return PHY_INTERFACE_MODE_RGMII_ID;
return 0;
return PHY_INTERFACE_MODE_RGMII;
}
}
cleanup:
free_skb_resources(priv);
return -ENOMEM;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
return PHY_INTERFACE_MODE_GMII;
/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
int err;
return PHY_INTERFACE_MODE_MII;
}
gfar_mac_reset(priv);
err = gfar_alloc_skb_resources(ndev);
if (err)
return err;
/* Initializes driver's PHY state, and attaches to the PHY.
* Returns 0 on success.
*/
static int init_phy(struct net_device *dev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface;
struct phy_device *phydev;
struct ethtool_eee edata;
gfar_init_tx_rx_base(priv);
linkmode_set_bit_array(phy_10_100_features_array,
ARRAY_SIZE(phy_10_100_features_array),
mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
smp_mb__before_atomic();
clear_bit(GFAR_DOWN, &priv->state);
smp_mb__after_atomic();
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
/* force link state update after mac reset */
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
interface = gfar_get_interface(dev);
phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
interface);
if (!phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
}
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
linkmode_and(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
phy_start(ndev->phydev);
/* Add support for flow control */
phy_support_asym_pause(phydev);
enable_napi(priv);
/* disable EEE autoneg, EEE not supported by eTSEC */
memset(&edata, 0, sizeof(struct ethtool_eee));
phy_ethtool_set_eee(phydev, &edata);
netif_tx_wake_all_queues(ndev);
return 0;
}
/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the TBIPA register. We assume
* that the TBIPA register is valid. Either the MDIO bus code will set
* it to a value that doesn't conflict with other PHYs on the bus, or the
* value doesn't matter, as there are no other PHYs on the bus.
*/
static void gfar_configure_serdes(struct net_device *dev)
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
struct gfar_private *priv = netdev_priv(dev);
struct phy_device *tbiphy;
struct net_device *ndev = priv->ndev;
struct phy_device *phydev = ndev->phydev;
u32 val = 0;
if (!priv->tbi_node) {
dev_warn(&dev->dev, "error: SGMII mode requires that the "
"device tree specify a tbi-handle\n");
return;
}
if (!phydev->duplex)
return val;
tbiphy = of_phy_find_device(priv->tbi_node);
if (!tbiphy) {
dev_err(&dev->dev, "error: Could not get TBI device\n");
return;
}
if (!priv->pause_aneg_en) {
if (priv->tx_pause_en)
val |= MACCFG1_TX_FLOW;
if (priv->rx_pause_en)
val |= MACCFG1_RX_FLOW;
} else {
u16 lcl_adv, rmt_adv;
u8 flowctrl;
/* get link partner capabilities */
rmt_adv = 0;
if (phydev->pause)
rmt_adv = LPA_PAUSE_CAP;
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
/* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
put_device(&tbiphy->mdio.dev);
return;
lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW;
if (flowctrl & FLOW_CTRL_RX)
val |= MACCFG1_RX_FLOW;
}
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, MII_ADVERTISE,
ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
ADVERTISE_1000XPSE_ASYM);
phy_write(tbiphy, MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
BMCR_SPEED1000);
put_device(&tbiphy->mdio.dev);
return val;
}
static int __gfar_is_rx_idle(struct gfar_private *priv)
static noinline void gfar_update_link_state(struct gfar_private *priv)
{
u32 res;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct net_device *ndev = priv->ndev;
struct phy_device *phydev = ndev->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
/* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
return 0;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
return;
/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
* the same as bits 23-30, the eTSEC Rx is assumed to be idle
* and the Rx can be safely reset.
*/
res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
res &= 0x7f807f80;
if ((res & 0xffff) == (res >> 16))
return 1;
if (phydev->link) {
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
return 0;
}
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
tempval &= ~(MACCFG2_FULL_DUPLEX);
else
tempval |= MACCFG2_FULL_DUPLEX;
/* Halt the receive and transmit queues */
static void gfar_halt_nodisable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
unsigned int timeout;
int stopped;
priv->oldduplex = phydev->duplex;
}
gfar_ints_disable(priv);
if (phydev->speed != priv->oldspeed) {
switch (phydev->speed) {
case 1000:
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
if (gfar_is_dma_stopped(priv))
return;
ecntrl &= ~(ECNTRL_R100);
break;
case 100:
case 10:
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
/* Reduced mode distinguishes
* between 10 and 100
*/
if (phydev->speed == SPEED_100)
ecntrl |= ECNTRL_R100;
else
ecntrl &= ~(ECNTRL_R100);
break;
default:
netif_warn(priv, link, priv->ndev,
"Ack! Speed (%d) is not 10/100/1000!\n",
phydev->speed);
break;
}
retry:
timeout = 1000;
while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
cpu_relax();
timeout--;
}
priv->oldspeed = phydev->speed;
}
if (!timeout)
stopped = gfar_is_dma_stopped(priv);
tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
tempval1 |= gfar_get_flowctrl_cfg(priv);
if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
!__gfar_is_rx_idle(priv))
goto retry;
}
/* Turn last free buffer recording on */
if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
for (i = 0; i < priv->num_rx_queues; i++) {
u32 bdp_dma;
/* Halt the receive and transmit queues */
void gfar_halt(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
rx_queue = priv->rx_queue[i];
bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
gfar_write(rx_queue->rfbptr, bdp_dma);
}
/* Dissable the Rx/Tx hw queues */
gfar_write(&regs->rqueue, 0);
gfar_write(&regs->tqueue, 0);
priv->tx_actual_en = 1;
}
mdelay(10);
if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
priv->tx_actual_en = 0;
gfar_halt_nodisable(priv);
gfar_write(&regs->maccfg1, tempval1);
gfar_write(&regs->maccfg2, tempval);
gfar_write(&regs->ecntrl, ecntrl);
/* Disable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
if (!priv->oldlink)
priv->oldlink = 1;
} else if (priv->oldlink) {
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
}
if (netif_msg_link(priv))
phy_print_status(phydev);
}
void stop_gfar(struct net_device *dev)
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the phydev structure, and this
* function converts those variables into the appropriate
* register values, and can bring down the device if needed.
*/
static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
netif_tx_stop_all_queues(dev);
smp_mb__before_atomic();
set_bit(GFAR_DOWN, &priv->state);
smp_mb__after_atomic();
if (unlikely(phydev->link != priv->oldlink ||
(phydev->link && (phydev->duplex != priv->oldduplex ||
phydev->speed != priv->oldspeed))))
gfar_update_link_state(priv);
}
disable_napi(priv);
/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the TBIPA register. We assume
* that the TBIPA register is valid. Either the MDIO bus code will set
* it to a value that doesn't conflict with other PHYs on the bus, or the
* value doesn't matter, as there are no other PHYs on the bus.
*/
static void gfar_configure_serdes(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct phy_device *tbiphy;
/* disable ints and gracefully shut down Rx/Tx DMA */
gfar_halt(priv);
if (!priv->tbi_node) {
dev_warn(&dev->dev, "error: SGMII mode requires that the "
"device tree specify a tbi-handle\n");
return;
}
phy_stop(dev->phydev);
tbiphy = of_phy_find_device(priv->tbi_node);
if (!tbiphy) {
dev_err(&dev->dev, "error: Could not get TBI device\n");
return;
}
free_skb_resources(priv);
}
/* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
put_device(&tbiphy->mdio.dev);
return;
}
static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
{
struct txbd8 *txbdp;
struct gfar_private *priv = netdev_priv(tx_queue->dev);
int i, j;
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
txbdp = tx_queue->tx_bd_base;
phy_write(tbiphy, MII_ADVERTISE,
ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
ADVERTISE_1000XPSE_ASYM);
for (i = 0; i < tx_queue->tx_ring_size; i++) {
if (!tx_queue->tx_skbuff[i])
continue;
phy_write(tbiphy, MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
BMCR_SPEED1000);
dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) {
txbdp++;
dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length),
DMA_TO_DEVICE);
}
txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
tx_queue->tx_skbuff[i] = NULL;
}
kfree(tx_queue->tx_skbuff);
tx_queue->tx_skbuff = NULL;
put_device(&tbiphy->mdio.dev);
}
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
/* Initializes driver's PHY state, and attaches to the PHY.
* Returns 0 on success.
*/
static int init_phy(struct net_device *dev)
{
int i;
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
struct ethtool_eee edata;
struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
linkmode_set_bit_array(phy_10_100_features_array,
ARRAY_SIZE(phy_10_100_features_array),
mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
dev_kfree_skb(rx_queue->skb);
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
for (i = 0; i < rx_queue->rx_ring_size; i++) {
struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
interface);
if (!phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
}
rxbdp->lstatus = 0;
rxbdp->bufPtr = 0;
rxbdp++;
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
if (!rxb->page)
continue;
/* Remove any features not supported by the controller */
linkmode_and(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
dma_unmap_page(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
/* Add support for flow control */
phy_support_asym_pause(phydev);
rxb->page = NULL;
}
/* disable EEE autoneg, EEE not supported by eTSEC */
memset(&edata, 0, sizeof(struct ethtool_eee));
phy_ethtool_set_eee(phydev, &edata);
kfree(rx_queue->rx_buff);
rx_queue->rx_buff = NULL;
}
/* If there are any tx skbs or rx skbs still around, free them.
* Then free tx_skbuff and rx_skbuff
*/
static void free_skb_resources(struct gfar_private *priv)
{
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
struct netdev_queue *txq;
tx_queue = priv->tx_queue[i];
txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
if (tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
netdev_tx_reset_queue(txq);
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
if (rx_queue->rx_buff)
free_skb_rx_queue(rx_queue);
}
dma_free_coherent(priv->dev,
sizeof(struct txbd8) * priv->total_tx_ring_size +
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
}
void gfar_start(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
int i = 0;
/* Enable Rx/Tx hw queues */
gfar_write(&regs->rqueue, priv->rqueue);
gfar_write(&regs->tqueue, priv->tqueue);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
tempval |= DMACTRL_INIT_SETTINGS;
gfar_write(&regs->dmactrl, tempval);
/* Make sure we aren't stopped */
tempval = gfar_read(&regs->dmactrl);
tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
/* Clear THLT/RHLT, so that the DMA starts polling now */
gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
}
/* Enable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
gfar_ints_enable(priv);
netif_trans_update(priv->ndev); /* prevent tx timeout */
}
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
free_irq(gfar_irq(grp, TX)->irq, grp);
free_irq(gfar_irq(grp, RX)->irq, grp);
free_irq(gfar_irq(grp, ER)->irq, grp);
}
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
struct gfar_private *priv = grp->priv;
struct net_device *dev = priv->ndev;
int err;
/* If the device has multiple interrupts, register for
* them. Otherwise, only register for the one
*/
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, ER)->irq);
goto err_irq_fail;
}
enable_irq_wake(gfar_irq(grp, ER)->irq);
err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, TX)->irq);
goto tx_irq_fail;
}
err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
gfar_irq(grp, RX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, RX)->irq);
goto rx_irq_fail;
}
enable_irq_wake(gfar_irq(grp, RX)->irq);
} else {
err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, TX)->irq);
goto err_irq_fail;
}
enable_irq_wake(gfar_irq(grp, TX)->irq);
}
return 0;
rx_irq_fail:
free_irq(gfar_irq(grp, TX)->irq, grp);
tx_irq_fail:
free_irq(gfar_irq(grp, ER)->irq, grp);
err_irq_fail:
return err;
}
static void gfar_free_irq(struct gfar_private *priv)
{
int i;
/* Free the IRQs */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
for (i = 0; i < priv->num_grps; i++)
free_grp_irqs(&priv->gfargrp[i]);
} else {
for (i = 0; i < priv->num_grps; i++)
free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
&priv->gfargrp[i]);
}
}
static int gfar_request_irq(struct gfar_private *priv)
{
int err, i, j;
for (i = 0; i < priv->num_grps; i++) {
err = register_grp_irqs(&priv->gfargrp[i]);
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
return err;
}
}
return 0;
}
/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
int err;
gfar_mac_reset(priv);
err = gfar_alloc_skb_resources(ndev);
if (err)
return err;
gfar_init_tx_rx_base(priv);
smp_mb__before_atomic();
clear_bit(GFAR_DOWN, &priv->state);
smp_mb__after_atomic();
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
/* force link state update after mac reset */
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
phy_start(ndev->phydev);
enable_napi(priv);
netif_tx_wake_all_queues(ndev);
return 0;
}
/* Called when something needs to use the ethernet device
* Returns 0 for success.
*/
static int gfar_enet_open(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
int err;
err = init_phy(dev);
if (err)
return err;
err = gfar_request_irq(priv);
if (err)
return err;
err = startup_gfar(dev);
if (err)
return err;
return err;
return 0;
}
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
......@@ -2583,22 +2039,6 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
/* Disconnect from the PHY */
phy_disconnect(dev->phydev);
gfar_free_irq(priv);
return 0;
}
/* Changes the mac address if the controller is not running. */
static int gfar_set_mac_address(struct net_device *dev)
{
......@@ -2660,22 +2100,101 @@ static void gfar_timeout(struct net_device *dev)
schedule_work(&priv->reset_task);
}
/* Interrupt Handler for Transmit complete */
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct net_device *dev = tx_queue->dev;
struct netdev_queue *txq;
struct gfar_private *priv = netdev_priv(dev);
struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
struct sk_buff *skb;
int skb_dirtytx;
int tx_ring_size = tx_queue->tx_ring_size;
int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
int tqi = tx_queue->qindex;
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* reserved for future extensions */
if (config.flags)
return -EINVAL;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
case HWTSTAMP_TX_ON:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
priv->hwts_tx_en = 1;
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
priv->hwts_rx_en = 0;
reset_gfar(netdev);
}
break;
default:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
if (!priv->hwts_rx_en) {
priv->hwts_rx_en = 1;
reset_gfar(netdev);
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
config.flags = 0;
config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
config.rx_filter = (priv->hwts_rx_en ?
HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
return gfar_hwtstamp_set(dev, rq);
if (cmd == SIOCGHWTSTAMP)
return gfar_hwtstamp_get(dev, rq);
if (!phydev)
return -ENODEV;
return phy_mii_ioctl(phydev, rq, cmd);
}
/* Interrupt Handler for Transmit complete */
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
struct net_device *dev = tx_queue->dev;
struct netdev_queue *txq;
struct gfar_private *priv = netdev_priv(dev);
struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
struct sk_buff *skb;
int skb_dirtytx;
int tx_ring_size = tx_queue->tx_ring_size;
int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
int tqi = tx_queue->qindex;
unsigned int bytes_sent = 0;
u32 lstatus;
size_t buflen;
......@@ -2767,77 +2286,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
{
struct page *page;
dma_addr_t addr;
page = dev_alloc_page();
if (unlikely(!page))
return false;
addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rxq->dev, addr))) {
__free_page(page);
return false;
}
rxb->dma = addr;
rxb->page = page;
rxb->page_offset = 0;
return true;
}
static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
{
struct gfar_private *priv = netdev_priv(rx_queue->ndev);
struct gfar_extra_stats *estats = &priv->extra_stats;
netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
atomic64_inc(&estats->rx_alloc_err);
}
static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
int alloc_cnt)
{
struct rxbd8 *bdp;
struct gfar_rx_buff *rxb;
int i;
i = rx_queue->next_to_use;
bdp = &rx_queue->rx_bd_base[i];
rxb = &rx_queue->rx_buff[i];
while (alloc_cnt--) {
/* try reuse page */
if (unlikely(!rxb->page)) {
if (unlikely(!gfar_new_page(rx_queue, rxb))) {
gfar_rx_alloc_err(rx_queue);
break;
}
}
/* Setup the new RxBD */
gfar_init_rxbdp(rx_queue, bdp,
rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
/* Update to the next pointer */
bdp++;
rxb++;
if (unlikely(++i == rx_queue->rx_ring_size)) {
i = 0;
bdp = rx_queue->rx_bd_base;
rxb = rx_queue->rx_buff;
}
}
rx_queue->next_to_use = i;
rx_queue->next_to_alloc = i;
}
static void count_errors(u32 lstatus, struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
......@@ -2875,7 +2323,7 @@ static void count_errors(u32 lstatus, struct net_device *ndev)
}
}
irqreturn_t gfar_receive(int irq, void *grp_id)
static irqreturn_t gfar_receive(int irq, void *grp_id)
{
struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
unsigned long flags;
......@@ -3077,7 +2525,8 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
* until the budget/quota has been reached. Returns the number
* of frames handled
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
int rx_work_limit)
{
struct net_device *ndev = rx_queue->ndev;
struct gfar_private *priv = netdev_priv(ndev);
......@@ -3327,6 +2776,98 @@ static int gfar_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
/* GFAR error interrupt handler */
static irqreturn_t gfar_error(int irq, void *grp_id)
{
struct gfar_priv_grp *gfargrp = grp_id;
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_private *priv= gfargrp->priv;
struct net_device *dev = priv->ndev;
/* Save ievent for future reference */
u32 events = gfar_read(&regs->ievent);
/* Clear IEVENT */
gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
/* Magic Packet is not an error. */
if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
(events & IEVENT_MAG))
events &= ~IEVENT_MAG;
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
netdev_dbg(dev,
"error interrupt (ievent=0x%08x imask=0x%08x)\n",
events, gfar_read(&regs->imask));
/* Update the error counters */
if (events & IEVENT_TXE) {
dev->stats.tx_errors++;
if (events & IEVENT_LC)
dev->stats.tx_window_errors++;
if (events & IEVENT_CRL)
dev->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
atomic64_inc(&priv->extra_stats.tx_underrun);
schedule_work(&priv->reset_task);
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
if (events & IEVENT_BSY) {
dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy);
netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
gfar_read(&regs->rstat));
}
if (events & IEVENT_BABR) {
dev->stats.rx_errors++;
atomic64_inc(&priv->extra_stats.rx_babr);
netif_dbg(priv, rx_err, dev, "babbling RX error\n");
}
if (events & IEVENT_EBERR) {
atomic64_inc(&priv->extra_stats.eberr);
netif_dbg(priv, rx_err, dev, "bus error\n");
}
if (events & IEVENT_RXC)
netif_dbg(priv, rx_status, dev, "control frame\n");
if (events & IEVENT_BABT) {
atomic64_inc(&priv->extra_stats.tx_babt);
netif_dbg(priv, tx_err, dev, "babbling TX error\n");
}
return IRQ_HANDLED;
}
/* The interrupt handler for devices with one interrupt */
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
{
struct gfar_priv_grp *gfargrp = grp_id;
/* Save ievent for future reference */
u32 events = gfar_read(&gfargrp->regs->ievent);
/* Check for reception */
if (events & IEVENT_RX_MASK)
gfar_receive(irq, grp_id);
/* Check for transmit completion */
if (events & IEVENT_TX_MASK)
gfar_transmit(irq, grp_id);
/* Check for errors */
if (events & IEVENT_ERR_MASK)
gfar_error(irq, grp_id);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling 'interrupt' - used by things like netconsole to send skbs
......@@ -3363,44 +2904,154 @@ static void gfar_netpoll(struct net_device *dev)
}
#endif
/* The interrupt handler for devices with one interrupt */
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
struct gfar_priv_grp *gfargrp = grp_id;
free_irq(gfar_irq(grp, TX)->irq, grp);
free_irq(gfar_irq(grp, RX)->irq, grp);
free_irq(gfar_irq(grp, ER)->irq, grp);
}
/* Save ievent for future reference */
u32 events = gfar_read(&gfargrp->regs->ievent);
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
struct gfar_private *priv = grp->priv;
struct net_device *dev = priv->ndev;
int err;
/* Check for reception */
if (events & IEVENT_RX_MASK)
gfar_receive(irq, grp_id);
/* If the device has multiple interrupts, register for
* them. Otherwise, only register for the one
*/
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, ER)->irq);
/* Check for transmit completion */
if (events & IEVENT_TX_MASK)
gfar_transmit(irq, grp_id);
goto err_irq_fail;
}
enable_irq_wake(gfar_irq(grp, ER)->irq);
/* Check for errors */
if (events & IEVENT_ERR_MASK)
gfar_error(irq, grp_id);
err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, TX)->irq);
goto tx_irq_fail;
}
err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
gfar_irq(grp, RX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, RX)->irq);
goto rx_irq_fail;
}
enable_irq_wake(gfar_irq(grp, RX)->irq);
} else {
err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, TX)->irq);
goto err_irq_fail;
}
enable_irq_wake(gfar_irq(grp, TX)->irq);
}
return 0;
rx_irq_fail:
free_irq(gfar_irq(grp, TX)->irq, grp);
tx_irq_fail:
free_irq(gfar_irq(grp, ER)->irq, grp);
err_irq_fail:
return err;
return IRQ_HANDLED;
}
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the phydev structure, and this
* function converts those variables into the appropriate
* register values, and can bring down the device if needed.
static void gfar_free_irq(struct gfar_private *priv)
{
int i;
/* Free the IRQs */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
for (i = 0; i < priv->num_grps; i++)
free_grp_irqs(&priv->gfargrp[i]);
} else {
for (i = 0; i < priv->num_grps; i++)
free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
&priv->gfargrp[i]);
}
}
static int gfar_request_irq(struct gfar_private *priv)
{
int err, i, j;
for (i = 0; i < priv->num_grps; i++) {
err = register_grp_irqs(&priv->gfargrp[i]);
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
return err;
}
}
return 0;
}
/* Called when something needs to use the ethernet device
* Returns 0 for success.
*/
static void adjust_link(struct net_device *dev)
static int gfar_enet_open(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
int err;
if (unlikely(phydev->link != priv->oldlink ||
(phydev->link && (phydev->duplex != priv->oldduplex ||
phydev->speed != priv->oldspeed))))
gfar_update_link_state(priv);
err = init_phy(dev);
if (err)
return err;
err = gfar_request_irq(priv);
if (err)
return err;
err = startup_gfar(dev);
if (err)
return err;
return err;
}
/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
/* Disconnect from the PHY */
phy_disconnect(dev->phydev);
gfar_free_irq(priv);
return 0;
}
/* Clears each of the exact match registers to zero, so they
* don't interfere with normal reception
*/
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
}
/* Update the hash table based on the current list of multicast
......@@ -3494,273 +3145,581 @@ static void gfar_set_multi(struct net_device *dev)
}
}
/* Clears each of the exact match registers to zero, so they
* don't interfere with normal reception
*/
static void gfar_clear_exact_match(struct net_device *dev)
void gfar_mac_reset(struct gfar_private *priv)
{
int idx;
static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
}
/* Reset MAC layer */
gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
* 1) Take the Destination Address (ie the multicast address), and
* do a CRC on it (little endian), and reverse the bits of the
* result.
* 2) Use the 8 most significant bits as a hash into a 256-entry
* table. The table is controlled through 8 32-bit registers:
* gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
* gaddr7. This means that the 3 most significant bits in the
* hash index which gaddr register to use, and the 5 other bits
* indicate which bit (assuming an IBM numbering scheme, which
* for PowerPC (tm) is usually the case) in the register holds
* the entry.
*/
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
struct gfar_private *priv = netdev_priv(dev);
u32 result = ether_crc(ETH_ALEN, addr);
int width = priv->hash_width;
u8 whichbit = (result >> (32 - width)) & 0x1f;
u8 whichreg = result >> (32 - width + 5);
u32 value = (1 << (31-whichbit));
/* We need to delay at least 3 TX clocks */
udelay(3);
tempval = gfar_read(priv->hash_regs[whichreg]);
tempval |= value;
gfar_write(priv->hash_regs[whichreg], tempval);
}
/* the soft reset bit is not self-resetting, so we need to
* clear it before resuming normal operation
*/
gfar_write(&regs->maccfg1, 0);
udelay(3);
/* There are multiple MAC Address register pairs on some controllers
* This function sets the numth pair to a given address
*/
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
const u8 *addr)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
gfar_rx_offload_en(priv);
macptr += num*2;
/* Initialize the max receive frame/buffer lengths */
gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
/* For a station address of 0x12345678ABCD in transmission
* order (BE), MACnADDR1 is set to 0xCDAB7856 and
* MACnADDR2 is set to 0x34120000.
/* Initialize the Minimum Frame Length Register */
gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
* are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
* and by checking RxBD[LG] and discarding larger than MAXFRM.
*/
tempval = (addr[5] << 24) | (addr[4] << 16) |
(addr[3] << 8) | addr[2];
if (gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
gfar_write(macptr, tempval);
gfar_write(&regs->maccfg2, tempval);
/* Clear mac addr hash registers */
gfar_write(&regs->igaddr0, 0);
gfar_write(&regs->igaddr1, 0);
gfar_write(&regs->igaddr2, 0);
gfar_write(&regs->igaddr3, 0);
gfar_write(&regs->igaddr4, 0);
gfar_write(&regs->igaddr5, 0);
gfar_write(&regs->igaddr6, 0);
gfar_write(&regs->igaddr7, 0);
gfar_write(&regs->gaddr0, 0);
gfar_write(&regs->gaddr1, 0);
gfar_write(&regs->gaddr2, 0);
gfar_write(&regs->gaddr3, 0);
gfar_write(&regs->gaddr4, 0);
gfar_write(&regs->gaddr5, 0);
gfar_write(&regs->gaddr6, 0);
gfar_write(&regs->gaddr7, 0);
if (priv->extended_hash)
gfar_clear_exact_match(priv->ndev);
gfar_mac_rx_config(priv);
gfar_mac_tx_config(priv);
gfar_set_mac_address(priv->ndev);
gfar_set_multi(priv->ndev);
/* clear ievent and imask before configuring coalescing */
gfar_ints_disable(priv);
/* Configure the coalescing support */
gfar_configure_coalescing_all(priv);
}
static void gfar_hw_init(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 attrs;
/* Stop the DMA engine now, in case it was running before
* (The firmware could have used it, and left it running).
*/
gfar_halt(priv);
gfar_mac_reset(priv);
/* Zero out the rmon mib registers if it has them */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
/* Mask off the CAM interrupts */
gfar_write(&regs->rmon.cam1, 0xffffffff);
gfar_write(&regs->rmon.cam2, 0xffffffff);
}
/* Initialize ECNTRL */
gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
/* Set the extraction length and index */
attrs = ATTRELI_EL(priv->rx_stash_size) |
ATTRELI_EI(priv->rx_stash_index);
gfar_write(&regs->attreli, attrs);
/* Start with defaults, and add stashing
* depending on driver parameters
*/
attrs = ATTR_INIT_SETTINGS;
if (priv->bd_stash_en)
attrs |= ATTR_BDSTASH;
if (priv->rx_stash_size != 0)
attrs |= ATTR_BUFSTASH;
gfar_write(&regs->attr, attrs);
/* FIFO configs */
gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
/* Program the interrupt steering regs, only for MG devices */
if (priv->num_grps > 1)
gfar_write_isrg(priv);
}
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
.ndo_stop = gfar_close,
.ndo_change_mtu = gfar_change_mtu,
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
#endif
};
/* Set up the ethernet device structure, private data,
* and anything else we need before we start
*/
static int gfar_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
int err = 0, i;
err = gfar_of_init(ofdev, &dev);
if (err)
return err;
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
INIT_WORK(&priv->reset_task, gfar_reset_task);
platform_set_drvdata(ofdev, priv);
gfar_detect_errata(priv);
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
/* MTU range: 50 - 9586 */
dev->mtu = 1500;
dev->min_mtu = 50;
dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
dev->netdev_ops = &gfar_netdev_ops;
dev->ethtool_ops = &gfar_ethtool_ops;
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
if (priv->poll_mode == GFAR_SQ_POLLING) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx_sq, 2);
} else {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
gfar_poll_rx, GFAR_DEV_WEIGHT);
netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx, 2);
}
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
gfar_init_addr_hash_table(priv);
/* Insert receive time stamps into padding alignment bytes, and
* plus 2 bytes padding to ensure the cpu alignment.
*/
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
priv->padding = 8 + DEFAULT_PADDING;
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
priv->tx_queue[i]->txic = DEFAULT_TXIC;
}
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
/* Always enable rx filer if available */
priv->rx_filer_enable =
(priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
/* use pritority h/w tx queue scheduling for single queue devices */
if (priv->num_tx_queues == 1)
priv->prio_sched_en = 1;
set_bit(GFAR_DOWN, &priv->state);
gfar_hw_init(priv);
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
err = register_netdev(dev);
if (err) {
pr_err("%s: Cannot register net device, aborting\n", dev->name);
goto register_fail;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
priv->wol_supported |= GFAR_WOL_MAGIC;
if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
priv->rx_filer_enable)
priv->wol_supported |= GFAR_WOL_FILER_UCAST;
device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
struct gfar_priv_grp *grp = &priv->gfargrp[i];
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_tx");
sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_rx");
sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_er");
} else
strcpy(gfar_irq(grp, TX)->name, dev->name);
}
/* Initialize the filer table */
gfar_init_filer_table(priv);
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
/* Even more device info helps when determining which kernel
* provided which set of benchmarks.
*/
netdev_info(dev, "Running with NAPI enabled\n");
for (i = 0; i < priv->num_rx_queues; i++)
netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
i, priv->rx_queue[i]->rx_ring_size);
for (i = 0; i < priv->num_tx_queues; i++)
netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
i, priv->tx_queue[i]->tx_ring_size);
return 0;
register_fail:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
unmap_group_regs(priv);
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
of_node_put(priv->phy_node);
of_node_put(priv->tbi_node);
free_gfar_dev(priv);
return err;
}
static int gfar_remove(struct platform_device *ofdev)
{
struct gfar_private *priv = platform_get_drvdata(ofdev);
struct device_node *np = ofdev->dev.of_node;
of_node_put(priv->phy_node);
of_node_put(priv->tbi_node);
unregister_netdev(priv->ndev);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
unmap_group_regs(priv);
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return 0;
}
#ifdef CONFIG_PM
static void __gfar_filer_disable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 temp;
temp = gfar_read(&regs->rctrl);
temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
gfar_write(&regs->rctrl, temp);
}
static void __gfar_filer_enable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 temp;
temp = gfar_read(&regs->rctrl);
temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
gfar_write(&regs->rctrl, temp);
}
/* Filer rules implementing wol capabilities */
static void gfar_filer_config_wol(struct gfar_private *priv)
{
unsigned int i;
u32 rqfcr;
__gfar_filer_disable(priv);
/* clear the filer table, reject any packet by default */
rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
for (i = 0; i <= MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, rqfcr, 0);
i = 0;
if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
/* unicast packet, accept it */
struct net_device *ndev = priv->ndev;
/* get the default rx queue index */
u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
(ndev->dev_addr[1] << 8) |
ndev->dev_addr[2];
rqfcr = (qindex << 10) | RQFCR_AND |
RQFCR_CMP_EXACT | RQFCR_PID_DAH;
gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
dest_mac_addr = (ndev->dev_addr[3] << 16) |
(ndev->dev_addr[4] << 8) |
ndev->dev_addr[5];
rqfcr = (qindex << 10) | RQFCR_GPI |
RQFCR_CMP_EXACT | RQFCR_PID_DAL;
gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
}
__gfar_filer_enable(priv);
}
static void gfar_filer_restore_table(struct gfar_private *priv)
{
u32 rqfcr, rqfpr;
unsigned int i;
__gfar_filer_disable(priv);
for (i = 0; i <= MAX_FILER_IDX; i++) {
rqfcr = priv->ftp_rqfcr[i];
rqfpr = priv->ftp_rqfpr[i];
gfar_write_filer(priv, i, rqfcr, rqfpr);
}
__gfar_filer_enable(priv);
}
/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
static void gfar_start_wol_filer(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
int i = 0;
/* Enable Rx hw queues */
gfar_write(&regs->rqueue, priv->rqueue);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
tempval |= DMACTRL_INIT_SETTINGS;
gfar_write(&regs->dmactrl, tempval);
/* Make sure we aren't stopped */
tempval = gfar_read(&regs->dmactrl);
tempval &= ~DMACTRL_GRS;
gfar_write(&regs->dmactrl, tempval);
tempval = (addr[1] << 24) | (addr[0] << 16);
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
/* Clear RHLT, so that the DMA starts polling now */
gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
/* enable the Filer General Purpose Interrupt */
gfar_write(&regs->imask, IMASK_FGPI);
}
gfar_write(macptr+1, tempval);
/* Enable Rx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
}
/* GFAR error interrupt handler */
static irqreturn_t gfar_error(int irq, void *grp_id)
static int gfar_suspend(struct device *dev)
{
struct gfar_priv_grp *gfargrp = grp_id;
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_private *priv= gfargrp->priv;
struct net_device *dev = priv->ndev;
/* Save ievent for future reference */
u32 events = gfar_read(&regs->ievent);
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
u16 wol = priv->wol_opts;
/* Clear IEVENT */
gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
if (!netif_running(ndev))
return 0;
/* Magic Packet is not an error. */
if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
(events & IEVENT_MAG))
events &= ~IEVENT_MAG;
disable_napi(priv);
netif_tx_lock(ndev);
netif_device_detach(ndev);
netif_tx_unlock(ndev);
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
netdev_dbg(dev,
"error interrupt (ievent=0x%08x imask=0x%08x)\n",
events, gfar_read(&regs->imask));
gfar_halt(priv);
/* Update the error counters */
if (events & IEVENT_TXE) {
dev->stats.tx_errors++;
if (wol & GFAR_WOL_MAGIC) {
/* Enable interrupt on Magic Packet */
gfar_write(&regs->imask, IMASK_MAG);
if (events & IEVENT_LC)
dev->stats.tx_window_errors++;
if (events & IEVENT_CRL)
dev->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
atomic64_inc(&priv->extra_stats.tx_underrun);
/* Enable Magic Packet mode */
tempval = gfar_read(&regs->maccfg2);
tempval |= MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
schedule_work(&priv->reset_task);
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
if (events & IEVENT_BSY) {
dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy);
/* re-enable the Rx block */
tempval = gfar_read(&regs->maccfg1);
tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
gfar_read(&regs->rstat));
}
if (events & IEVENT_BABR) {
dev->stats.rx_errors++;
atomic64_inc(&priv->extra_stats.rx_babr);
} else if (wol & GFAR_WOL_FILER_UCAST) {
gfar_filer_config_wol(priv);
gfar_start_wol_filer(priv);
netif_dbg(priv, rx_err, dev, "babbling RX error\n");
}
if (events & IEVENT_EBERR) {
atomic64_inc(&priv->extra_stats.eberr);
netif_dbg(priv, rx_err, dev, "bus error\n");
} else {
phy_stop(ndev->phydev);
}
if (events & IEVENT_RXC)
netif_dbg(priv, rx_status, dev, "control frame\n");
if (events & IEVENT_BABT) {
atomic64_inc(&priv->extra_stats.tx_babt);
netif_dbg(priv, tx_err, dev, "babbling TX error\n");
}
return IRQ_HANDLED;
return 0;
}
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
static int gfar_resume(struct device *dev)
{
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct phy_device *phydev = ndev->phydev;
u32 val = 0;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
u16 wol = priv->wol_opts;
if (!phydev->duplex)
return val;
if (!netif_running(ndev))
return 0;
if (!priv->pause_aneg_en) {
if (priv->tx_pause_en)
val |= MACCFG1_TX_FLOW;
if (priv->rx_pause_en)
val |= MACCFG1_RX_FLOW;
} else {
u16 lcl_adv, rmt_adv;
u8 flowctrl;
/* get link partner capabilities */
rmt_adv = 0;
if (phydev->pause)
rmt_adv = LPA_PAUSE_CAP;
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
if (wol & GFAR_WOL_MAGIC) {
/* Disable Magic Packet mode */
tempval = gfar_read(&regs->maccfg2);
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW;
if (flowctrl & FLOW_CTRL_RX)
val |= MACCFG1_RX_FLOW;
} else if (wol & GFAR_WOL_FILER_UCAST) {
/* need to stop rx only, tx is already down */
gfar_halt(priv);
gfar_filer_restore_table(priv);
} else {
phy_start(ndev->phydev);
}
return val;
gfar_start(priv);
netif_device_attach(ndev);
enable_napi(priv);
return 0;
}
static noinline void gfar_update_link_state(struct gfar_private *priv)
static int gfar_restore(struct device *dev)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct phy_device *phydev = ndev->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
return;
if (phydev->link) {
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
tempval &= ~(MACCFG2_FULL_DUPLEX);
else
tempval |= MACCFG2_FULL_DUPLEX;
if (!netif_running(ndev)) {
netif_device_attach(ndev);
priv->oldduplex = phydev->duplex;
}
return 0;
}
if (phydev->speed != priv->oldspeed) {
switch (phydev->speed) {
case 1000:
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
gfar_init_bds(ndev);
ecntrl &= ~(ECNTRL_R100);
break;
case 100:
case 10:
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
gfar_mac_reset(priv);
/* Reduced mode distinguishes
* between 10 and 100
*/
if (phydev->speed == SPEED_100)
ecntrl |= ECNTRL_R100;
else
ecntrl &= ~(ECNTRL_R100);
break;
default:
netif_warn(priv, link, priv->ndev,
"Ack! Speed (%d) is not 10/100/1000!\n",
phydev->speed);
break;
}
gfar_init_tx_rx_base(priv);
priv->oldspeed = phydev->speed;
}
gfar_start(priv);
tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
tempval1 |= gfar_get_flowctrl_cfg(priv);
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
/* Turn last free buffer recording on */
if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
for (i = 0; i < priv->num_rx_queues; i++) {
u32 bdp_dma;
if (ndev->phydev)
phy_start(ndev->phydev);
rx_queue = priv->rx_queue[i];
bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
gfar_write(rx_queue->rfbptr, bdp_dma);
}
netif_device_attach(ndev);
enable_napi(priv);
priv->tx_actual_en = 1;
}
return 0;
}
if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
priv->tx_actual_en = 0;
static const struct dev_pm_ops gfar_pm_ops = {
.suspend = gfar_suspend,
.resume = gfar_resume,
.freeze = gfar_suspend,
.thaw = gfar_resume,
.restore = gfar_restore,
};
gfar_write(&regs->maccfg1, tempval1);
gfar_write(&regs->maccfg2, tempval);
gfar_write(&regs->ecntrl, ecntrl);
#define GFAR_PM_OPS (&gfar_pm_ops)
if (!priv->oldlink)
priv->oldlink = 1;
#else
} else if (priv->oldlink) {
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
}
#define GFAR_PM_OPS NULL
if (netif_msg_link(priv))
phy_print_status(phydev);
}
#endif
static const struct of_device_id gfar_match[] =
{
......
......@@ -67,8 +67,6 @@ struct ethtool_rx_list {
/* Number of bytes to align the rx bufs to */
#define RXBUF_ALIGNMENT 64
#define PHY_INIT_TIMEOUT 100000
#define DRV_NAME "gfar-enet"
extern const char gfar_driver_version[];
......@@ -88,10 +86,6 @@ extern const char gfar_driver_version[];
#define GFAR_RX_MAX_RING_SIZE 256
#define GFAR_TX_MAX_RING_SIZE 256
#define GFAR_MAX_FIFO_THRESHOLD 511
#define GFAR_MAX_FIFO_STARVE 511
#define GFAR_MAX_FIFO_STARVE_OFF 511
#define FBTHR_SHIFT 24
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
......@@ -109,9 +103,6 @@ extern const char gfar_driver_version[];
#define DEFAULT_FIFO_TX_THR 0x100
#define DEFAULT_FIFO_TX_STARVE 0x40
#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
#define DEFAULT_BD_STASH 1
#define DEFAULT_STASH_LENGTH 96
#define DEFAULT_STASH_INDEX 0
/* The number of Exact Match registers */
#define GFAR_EM_NUM 15
......@@ -139,15 +130,6 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_COALESCE 0
#define DEFAULT_RXCOUNT 0
#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
| SUPPORTED_10baseT_Full \
| SUPPORTED_100baseT_Half \
| SUPPORTED_100baseT_Full \
| SUPPORTED_Autoneg \
| SUPPORTED_MII)
#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full
/* TBI register addresses */
#define MII_TBICON 0x11
......@@ -185,8 +167,6 @@ extern const char gfar_driver_version[];
#define ECNTRL_REDUCED_MII_MODE 0x00000004
#define ECNTRL_SGMII_MODE 0x00000002
#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
#define MINFLR_INIT_SETTINGS 0x00000040
/* Tqueue control */
......@@ -266,12 +246,6 @@ extern const char gfar_driver_version[];
#define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME)
#define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME)
#define skip_bd(bdp, stride, base, ring_size) ({ \
typeof(bdp) new_bd = (bdp) + (stride); \
(new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; })
#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
#define RCTRL_TS_ENABLE 0x01000000
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_LFC 0x00004000
......@@ -385,11 +359,6 @@ extern const char gfar_driver_version[];
#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
/* Fifo management */
#define FIFO_TX_THR_MASK 0x01ff
#define FIFO_TX_STARVE_MASK 0x01ff
#define FIFO_TX_STARVE_OFF_MASK 0x01ff
/* Attribute fields */
/* This enables rx snooping for buffers and descriptors */
......@@ -1326,16 +1295,9 @@ static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
return bdp_dma;
}
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
void reset_gfar(struct net_device *dev);
void gfar_mac_reset(struct gfar_private *priv);
void gfar_halt(struct gfar_private *priv);
void gfar_start(struct gfar_private *priv);
void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
u32 regnum, u32 read);
void gfar_configure_coalescing_all(struct gfar_private *priv);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
......@@ -1348,13 +1310,6 @@ extern const struct ethtool_ops gfar_ethtool_ops;
#define RQFCR_PID_PORT_MASK 0xFFFF0000
#define RQFCR_PID_MAC_MASK 0xFF000000
struct gfar_mask_entry {
unsigned int mask; /* The mask value which is valid form start to end */
unsigned int start;
unsigned int end;
unsigned int block; /* Same block values indicate depended entries */
};
/* Represents a receive filer table entry */
struct gfar_filer_entry {
u32 ctrl;
......
......@@ -45,19 +45,6 @@
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
u64 *buf);
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
static int gfar_gcoalesce(struct net_device *dev,
struct ethtool_coalesce *cvals);
static int gfar_scoalesce(struct net_device *dev,
struct ethtool_coalesce *cvals);
static void gfar_gringparam(struct net_device *dev,
struct ethtool_ringparam *rvals);
static int gfar_sringparam(struct net_device *dev,
struct ethtool_ringparam *rvals);
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
/* extra stats */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment