Commit b57dfbc4 authored by David S. Miller's avatar David S. Miller

Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

parents 42fe95ca e8ef7f29
...@@ -50,7 +50,7 @@ struct e1000_stats { ...@@ -50,7 +50,7 @@ struct e1000_stats {
int stat_offset; int stat_offset;
}; };
#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ #define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
offsetof(struct e1000_adapter, m) offsetof(struct e1000_adapter, m)
static const struct e1000_stats e1000_gstrings_stats[] = { static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_packets", E1000_STAT(stats.gprc) }, { "rx_packets", E1000_STAT(stats.gprc) },
......
...@@ -1195,6 +1195,14 @@ e1000_probe(struct pci_dev *pdev, ...@@ -1195,6 +1195,14 @@ e1000_probe(struct pci_dev *pdev,
printk("%s\n", print_mac(mac, netdev->dev_addr)); printk("%s\n", print_mac(mac, netdev->dev_addr));
if (adapter->hw.bus_type == e1000_bus_type_pci_express) {
DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
"longer be supported by this driver in the future.\n",
pdev->vendor, pdev->device);
DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
"driver instead.\n");
}
/* reset the hardware with the new settings */ /* reset the hardware with the new settings */
e1000_reset(adapter); e1000_reset(adapter);
......
...@@ -1055,23 +1055,6 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter) ...@@ -1055,23 +1055,6 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter)
} }
} }
static void e1000_release_manageability(struct e1000_adapter *adapter)
{
if (adapter->flags & FLAG_MNG_PT_ENABLED) {
struct e1000_hw *hw = &adapter->hw;
u32 manc = er32(MANC);
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
manc &= ~E1000_MANC_EN_MNG2HOST;
/* don't explicitly have to mess with MANC2H since
* MANC has an enable disable that gates MANC2H */
ew32(MANC, manc);
}
}
/** /**
* @e1000_alloc_ring - allocate memory for a ring structure * @e1000_alloc_ring - allocate memory for a ring structure
**/ **/
...@@ -1561,9 +1544,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter) ...@@ -1561,9 +1544,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
manc = er32(MANC); manc = er32(MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
/* enable receiving management packets to the host. this will probably /* enable receiving management packets to the host. this will probably
* generate destination unreachable messages from the host OS, but * generate destination unreachable messages from the host OS, but
* the packets will be handled on SMBUS */ * the packets will be handled on SMBUS */
...@@ -1690,6 +1670,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -1690,6 +1670,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
else else
rctl |= E1000_RCTL_LPE; rctl |= E1000_RCTL_LPE;
/* Enable hardware CRC frame stripping */
rctl |= E1000_RCTL_SECRC;
/* Setup buffer sizes */ /* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096; rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX; rctl |= E1000_RCTL_BSEX;
...@@ -1755,9 +1738,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -1755,9 +1738,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* Enable Packet split descriptors */ /* Enable Packet split descriptors */
rctl |= E1000_RCTL_DTYP_PS; rctl |= E1000_RCTL_DTYP_PS;
/* Enable hardware CRC frame stripping */
rctl |= E1000_RCTL_SECRC;
psrctl |= adapter->rx_ps_bsize0 >> psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT; E1000_PSRCTL_BSIZE0_SHIFT;
...@@ -2008,7 +1988,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) ...@@ -2008,7 +1988,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
u16 mii_reg; u16 mii_reg;
/* WoL is enabled */ /* WoL is enabled */
if (!adapter->wol) if (adapter->wol)
return; return;
/* non-copper PHY? */ /* non-copper PHY? */
...@@ -2140,8 +2120,6 @@ void e1000e_reset(struct e1000_adapter *adapter) ...@@ -2140,8 +2120,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD; phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
} }
e1000_release_manageability(adapter);
} }
int e1000e_up(struct e1000_adapter *adapter) int e1000e_up(struct e1000_adapter *adapter)
...@@ -3487,8 +3465,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -3487,8 +3465,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 0); pci_enable_wake(pdev, PCI_D3cold, 0);
} }
e1000_release_manageability(adapter);
/* make sure adapter isn't asleep if manageability is enabled */ /* make sure adapter isn't asleep if manageability is enabled */
if (adapter->flags & FLAG_MNG_PT_ENABLED) { if (adapter->flags & FLAG_MNG_PT_ENABLED) {
pci_enable_wake(pdev, PCI_D3hot, 1); pci_enable_wake(pdev, PCI_D3hot, 1);
...@@ -4054,8 +4030,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev) ...@@ -4054,8 +4030,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
flush_scheduled_work(); flush_scheduled_work();
e1000_release_manageability(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this /* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */ * would have already happened in close and is redundant. */
e1000_release_hw_control(adapter); e1000_release_hw_control(adapter);
......
...@@ -130,8 +130,8 @@ static void free_skb_resources(struct gfar_private *priv); ...@@ -130,8 +130,8 @@ static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev); static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
static void gfar_configure_serdes(struct net_device *dev); static void gfar_configure_serdes(struct net_device *dev);
extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value); extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum); extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
#ifdef CONFIG_GFAR_NAPI #ifdef CONFIG_GFAR_NAPI
static int gfar_poll(struct napi_struct *napi, int budget); static int gfar_poll(struct napi_struct *napi, int budget);
#endif #endif
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
* the local mdio pins, which may not be the same as system mdio bus, used for * the local mdio pins, which may not be the same as system mdio bus, used for
* controlling the external PHYs, for example. * controlling the external PHYs, for example.
*/ */
int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
int regnum, u16 value) int regnum, u16 value)
{ {
/* Set the PHY address and the register address we want to write */ /* Set the PHY address and the register address we want to write */
...@@ -77,7 +77,7 @@ int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, ...@@ -77,7 +77,7 @@ int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id,
* and are always tied to the local mdio pins, which may not be the * and are always tied to the local mdio pins, which may not be the
* same as system mdio bus, used for controlling the external PHYs, for eg. * same as system mdio bus, used for controlling the external PHYs, for eg.
*/ */
int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum) int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum)
{ {
u16 value; u16 value;
......
...@@ -289,7 +289,6 @@ static void ax_bump(struct mkiss *ax) ...@@ -289,7 +289,6 @@ static void ax_bump(struct mkiss *ax)
*ax->rbuff &= ~0x20; *ax->rbuff &= ~0x20;
} }
} }
spin_unlock_bh(&ax->buflock);
count = ax->rcount; count = ax->rcount;
...@@ -297,17 +296,17 @@ static void ax_bump(struct mkiss *ax) ...@@ -297,17 +296,17 @@ static void ax_bump(struct mkiss *ax)
printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
ax->dev->name); ax->dev->name);
ax->stats.rx_dropped++; ax->stats.rx_dropped++;
spin_unlock_bh(&ax->buflock);
return; return;
} }
spin_lock_bh(&ax->buflock);
memcpy(skb_put(skb,count), ax->rbuff, count); memcpy(skb_put(skb,count), ax->rbuff, count);
spin_unlock_bh(&ax->buflock);
skb->protocol = ax25_type_trans(skb, ax->dev); skb->protocol = ax25_type_trans(skb, ax->dev);
netif_rx(skb); netif_rx(skb);
ax->dev->last_rx = jiffies; ax->dev->last_rx = jiffies;
ax->stats.rx_packets++; ax->stats.rx_packets++;
ax->stats.rx_bytes += count; ax->stats.rx_bytes += count;
spin_unlock_bh(&ax->buflock);
} }
static void kiss_unesc(struct mkiss *ax, unsigned char s) static void kiss_unesc(struct mkiss *ax, unsigned char s)
......
...@@ -43,7 +43,7 @@ struct igb_stats { ...@@ -43,7 +43,7 @@ struct igb_stats {
int stat_offset; int stat_offset;
}; };
#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \ #define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
offsetof(struct igb_adapter, m) offsetof(struct igb_adapter, m)
static const struct igb_stats igb_gstrings_stats[] = { static const struct igb_stats igb_gstrings_stats[] = {
{ "rx_packets", IGB_STAT(stats.gprc) }, { "rx_packets", IGB_STAT(stats.gprc) },
......
...@@ -606,9 +606,6 @@ static void igb_init_manageability(struct igb_adapter *adapter) ...@@ -606,9 +606,6 @@ static void igb_init_manageability(struct igb_adapter *adapter)
u32 manc2h = rd32(E1000_MANC2H); u32 manc2h = rd32(E1000_MANC2H);
u32 manc = rd32(E1000_MANC); u32 manc = rd32(E1000_MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
/* enable receiving management packets to the host */ /* enable receiving management packets to the host */
/* this will probably generate destination unreachable messages /* this will probably generate destination unreachable messages
* from the host OS, but the packets will be handled on SMBUS */ * from the host OS, but the packets will be handled on SMBUS */
...@@ -623,25 +620,6 @@ static void igb_init_manageability(struct igb_adapter *adapter) ...@@ -623,25 +620,6 @@ static void igb_init_manageability(struct igb_adapter *adapter)
} }
} }
static void igb_release_manageability(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
if (adapter->en_mng_pt) {
u32 manc = rd32(E1000_MANC);
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
manc &= ~E1000_MANC_EN_MNG2HOST;
/* don't explicitly have to mess with MANC2H since
* MANC has an enable disable that gates MANC2H */
/* XXX stop the hardware watchdog ? */
wr32(E1000_MANC, manc);
}
}
/** /**
* igb_configure - configure the hardware for RX and TX * igb_configure - configure the hardware for RX and TX
* @adapter: private board structure * @adapter: private board structure
...@@ -844,7 +822,6 @@ void igb_reset(struct igb_adapter *adapter) ...@@ -844,7 +822,6 @@ void igb_reset(struct igb_adapter *adapter)
igb_reset_adaptive(&adapter->hw); igb_reset_adaptive(&adapter->hw);
adapter->hw.phy.ops.get_phy_info(&adapter->hw); adapter->hw.phy.ops.get_phy_info(&adapter->hw);
igb_release_manageability(adapter);
} }
/** /**
...@@ -1178,9 +1155,6 @@ static void __devexit igb_remove(struct pci_dev *pdev) ...@@ -1178,9 +1155,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
flush_scheduled_work(); flush_scheduled_work();
igb_release_manageability(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this /* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */ * would have already happened in close and is redundant. */
igb_release_hw_control(adapter); igb_release_hw_control(adapter);
...@@ -3955,8 +3929,6 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -3955,8 +3929,6 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 0); pci_enable_wake(pdev, PCI_D3cold, 0);
} }
igb_release_manageability(adapter);
/* make sure adapter isn't asleep if manageability is enabled */ /* make sure adapter isn't asleep if manageability is enabled */
if (adapter->en_mng_pt) { if (adapter->en_mng_pt) {
pci_enable_wake(pdev, PCI_D3hot, 1); pci_enable_wake(pdev, PCI_D3hot, 1);
......
...@@ -49,7 +49,7 @@ struct ixgb_stats { ...@@ -49,7 +49,7 @@ struct ixgb_stats {
int stat_offset; int stat_offset;
}; };
#define IXGB_STAT(m) sizeof(((struct ixgb_adapter *)0)->m), \ #define IXGB_STAT(m) FIELD_SIZEOF(struct ixgb_adapter, m), \
offsetof(struct ixgb_adapter, m) offsetof(struct ixgb_adapter, m)
static struct ixgb_stats ixgb_gstrings_stats[] = { static struct ixgb_stats ixgb_gstrings_stats[] = {
{"rx_packets", IXGB_STAT(net_stats.rx_packets)}, {"rx_packets", IXGB_STAT(net_stats.rx_packets)},
......
...@@ -220,7 +220,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, ...@@ -220,7 +220,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
tx_ring->stats.bytes += tx_buffer_info->length; tx_ring->stats.bytes += tx_buffer_info->length;
if (cleaned) { if (cleaned) {
struct sk_buff *skb = tx_buffer_info->skb; struct sk_buff *skb = tx_buffer_info->skb;
#ifdef NETIF_F_TSO
unsigned int segs, bytecount; unsigned int segs, bytecount;
segs = skb_shinfo(skb)->gso_segs ?: 1; segs = skb_shinfo(skb)->gso_segs ?: 1;
/* multiply data chunks by size of headers */ /* multiply data chunks by size of headers */
...@@ -228,10 +227,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, ...@@ -228,10 +227,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
skb->len; skb->len;
total_tx_packets += segs; total_tx_packets += segs;
total_tx_bytes += bytecount; total_tx_bytes += bytecount;
#else
total_tx_packets++;
total_tx_bytes += skb->len;
#endif
} }
ixgbe_unmap_and_free_tx_resource(adapter, ixgbe_unmap_and_free_tx_resource(adapter,
tx_buffer_info); tx_buffer_info);
...@@ -1942,6 +1937,10 @@ static int ixgbe_open(struct net_device *netdev) ...@@ -1942,6 +1937,10 @@ static int ixgbe_open(struct net_device *netdev)
int err; int err;
u32 num_rx_queues = adapter->num_rx_queues; u32 num_rx_queues = adapter->num_rx_queues;
/* disallow open during test */
if (test_bit(__IXGBE_TESTING, &adapter->state))
return -EBUSY;
try_intr_reinit: try_intr_reinit:
/* allocate transmit descriptors */ /* allocate transmit descriptors */
err = ixgbe_setup_all_tx_resources(adapter); err = ixgbe_setup_all_tx_resources(adapter);
...@@ -2278,11 +2277,29 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, ...@@ -2278,11 +2277,29 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT); IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->protocol == htons(ETH_P_IP)) switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
type_tucmd_mlhl |=
IXGBE_ADVTXD_TUCMD_L4T_TCP;
break;
case __constant_htons(ETH_P_IPV6):
/* XXX what about other V6 headers?? */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
type_tucmd_mlhl |=
IXGBE_ADVTXD_TUCMD_L4T_TCP;
break;
if (skb->sk->sk_protocol == IPPROTO_TCP) default:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; if (unlikely(net_ratelimit())) {
DPRINTK(PROBE, WARNING,
"partial checksum but proto=%x!\n",
skb->protocol);
}
break;
}
} }
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
...@@ -2778,6 +2795,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -2778,6 +2795,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->mac.type, hw->phy.type, hw->mac.type, hw->phy.type,
(part_num >> 8), (part_num & 0xff)); (part_num >> 8), (part_num & 0xff));
if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
"this card is not sufficient for optimal "
"performance.\n");
dev_warn(&pdev->dev, "For optimal performance a x8 "
"PCI-Express slot is required.\n");
}
/* reset the hardware with the new settings */ /* reset the hardware with the new settings */
ixgbe_start_hw(hw); ixgbe_start_hw(hw);
......
...@@ -559,8 +559,16 @@ static int mhz_setup(struct pcmcia_device *link) ...@@ -559,8 +559,16 @@ static int mhz_setup(struct pcmcia_device *link)
/* Read the station address from the CIS. It is stored as the last /* Read the station address from the CIS. It is stored as the last
(fourth) string in the Version 1 Version/ID tuple. */ (fourth) string in the Version 1 Version/ID tuple. */
if (link->prod_id[3]) { tuple->DesiredTuple = CISTPL_VERS_1;
station_addr = link->prod_id[3]; if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
rc = -1;
goto free_cfg_mem;
}
/* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
if (next_tuple(link, tuple, parse) != CS_SUCCESS)
first_tuple(link, tuple, parse);
if (parse->version_1.ns > 3) {
station_addr = parse->version_1.str + parse->version_1.ofs[3];
if (cvt_ascii_address(dev, station_addr) == 0) { if (cvt_ascii_address(dev, station_addr) == 0) {
rc = 0; rc = 0;
goto free_cfg_mem; goto free_cfg_mem;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/net.h> #include <linux/net.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -297,18 +298,11 @@ static void tsi108_check_phy(struct net_device *dev) ...@@ -297,18 +298,11 @@ static void tsi108_check_phy(struct net_device *dev)
u32 speed; u32 speed;
unsigned long flags; unsigned long flags;
/* Do a dummy read, as for some reason the first read
* after a link becomes up returns link down, even if
* it's been a while since the link came up.
*/
spin_lock_irqsave(&phy_lock, flags); spin_lock_irqsave(&phy_lock, flags);
if (!data->phy_ok) if (!data->phy_ok)
goto out; goto out;
tsi108_read_mii(data, MII_BMSR);
duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media); duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
data->init_media = 0; data->init_media = 0;
...@@ -345,22 +339,21 @@ static void tsi108_check_phy(struct net_device *dev) ...@@ -345,22 +339,21 @@ static void tsi108_check_phy(struct net_device *dev)
TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg); TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg); TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
}
if (data->link_up == 0) { if (data->link_up == 0) {
/* The manual says it can take 3-4 usecs for the speed change /* The manual says it can take 3-4 usecs for the speed change
* to take effect. * to take effect.
*/ */
udelay(5); udelay(5);
spin_lock(&data->txlock); spin_lock(&data->txlock);
if (is_valid_ether_addr(dev->dev_addr) && data->txfree) if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
netif_wake_queue(dev); netif_wake_queue(dev);
data->link_up = 1; data->link_up = 1;
spin_unlock(&data->txlock); spin_unlock(&data->txlock);
}
} }
} else { } else {
if (data->link_up == 1) { if (data->link_up == 1) {
netif_stop_queue(dev); netif_stop_queue(dev);
...@@ -1274,12 +1267,11 @@ static void tsi108_init_phy(struct net_device *dev) ...@@ -1274,12 +1267,11 @@ static void tsi108_init_phy(struct net_device *dev)
* PHY_STAT register before the link up status bit is set. * PHY_STAT register before the link up status bit is set.
*/ */
data->link_up = 1; data->link_up = 0;
while (!((phyval = tsi108_read_mii(data, MII_BMSR)) & while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
BMSR_LSTATUS)) { BMSR_LSTATUS)) {
if (i++ > (MII_READ_DELAY / 10)) { if (i++ > (MII_READ_DELAY / 10)) {
data->link_up = 0;
break; break;
} }
spin_unlock_irqrestore(&phy_lock, flags); spin_unlock_irqrestore(&phy_lock, flags);
...@@ -1287,6 +1279,7 @@ static void tsi108_init_phy(struct net_device *dev) ...@@ -1287,6 +1279,7 @@ static void tsi108_init_phy(struct net_device *dev)
spin_lock_irqsave(&phy_lock, flags); spin_lock_irqsave(&phy_lock, flags);
} }
data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval); printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
data->phy_ok = 1; data->phy_ok = 1;
data->init_media = 1; data->init_media = 1;
...@@ -1527,12 +1520,46 @@ static void tsi108_init_mac(struct net_device *dev) ...@@ -1527,12 +1520,46 @@ static void tsi108_init_mac(struct net_device *dev)
TSI_WRITE(TSI108_EC_INTMASK, ~0); TSI_WRITE(TSI108_EC_INTMASK, ~0);
} }
static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
rc = mii_ethtool_gset(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
rc = mii_ethtool_sset(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{ {
struct tsi108_prv_data *data = netdev_priv(dev); struct tsi108_prv_data *data = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL); return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
} }
static const struct ethtool_ops tsi108_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = tsi108_get_settings,
.set_settings = tsi108_set_settings,
};
static int static int
tsi108_init_one(struct platform_device *pdev) tsi108_init_one(struct platform_device *pdev)
{ {
...@@ -1584,7 +1611,6 @@ tsi108_init_one(struct platform_device *pdev) ...@@ -1584,7 +1611,6 @@ tsi108_init_one(struct platform_device *pdev)
data->mii_if.phy_id = einfo->phy; data->mii_if.phy_id = einfo->phy;
data->mii_if.phy_id_mask = 0x1f; data->mii_if.phy_id_mask = 0x1f;
data->mii_if.reg_num_mask = 0x1f; data->mii_if.reg_num_mask = 0x1f;
data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
data->phy = einfo->phy; data->phy = einfo->phy;
data->phy_type = einfo->phy_type; data->phy_type = einfo->phy_type;
...@@ -1598,6 +1624,7 @@ tsi108_init_one(struct platform_device *pdev) ...@@ -1598,6 +1624,7 @@ tsi108_init_one(struct platform_device *pdev)
dev->get_stats = tsi108_get_stats; dev->get_stats = tsi108_get_stats;
netif_napi_add(dev, &data->napi, tsi108_poll, 64); netif_napi_add(dev, &data->napi, tsi108_poll, 64);
dev->do_ioctl = tsi108_do_ioctl; dev->do_ioctl = tsi108_do_ioctl;
dev->ethtool_ops = &tsi108_ethtool_ops;
/* Apparently, the Linux networking code won't use scatter-gather /* Apparently, the Linux networking code won't use scatter-gather
* if the hardware doesn't do checksums. However, it's faster * if the hardware doesn't do checksums. However, it's faster
...@@ -1629,6 +1656,7 @@ tsi108_init_one(struct platform_device *pdev) ...@@ -1629,6 +1656,7 @@ tsi108_init_one(struct platform_device *pdev)
goto register_fail; goto register_fail;
} }
platform_set_drvdata(pdev, dev);
printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n", printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n",
dev->name, print_mac(mac, dev->dev_addr)); dev->name, print_mac(mac, dev->dev_addr));
#ifdef DEBUG #ifdef DEBUG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment