Commit a20da984 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to ixgbe, igb and e1000e.  Majority of the
changes are against igb.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ce43b03e ed83da12
......@@ -26,8 +26,7 @@
*******************************************************************************/
/*
* 80003ES2LAN Gigabit Ethernet Controller (Copper)
/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
*/
......@@ -80,7 +79,8 @@
1 = 50-80M
2 = 80-110M
3 = 110-140M
4 = >140M */
4 = >140M
*/
/* Kumeran Mode Control Register (Page 193, Register 16) */
#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
......@@ -95,8 +95,7 @@
/* In-Band Control Register (Page 194, Register 18) */
#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
/*
* A table for the GG82563 cable length where the range is defined
/* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
*/
......@@ -183,8 +182,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
/*
* Added to a constant, "size" becomes the left-shift value
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
......@@ -375,8 +373,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
/* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000e_put_hw_semaphore(hw);
......@@ -442,8 +439,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
/*
* Use Alternative Page Select register to access
/* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
......@@ -457,8 +453,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
/*
* The "ready" bit in the MDIC register may be incorrectly set
/* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
......@@ -513,8 +508,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
/*
* Use Alternative Page Select register to access
/* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
......@@ -528,8 +522,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
/*
* The "ready" bit in the MDIC register may be incorrectly set
/* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
......@@ -618,8 +611,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
u16 phy_data;
bool link;
/*
* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
/* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
......@@ -657,8 +649,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
return ret_val;
if (!link) {
/*
* We didn't get link.
/* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1000e_phy_reset_dsp(hw);
......@@ -677,8 +668,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/*
* Resetting the phy means we need to verify the TX_CLK corresponds
/* Resetting the phy means we need to verify the TX_CLK corresponds
* to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
*/
phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
......@@ -687,8 +677,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
else
phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
/*
* In addition, we must re-enable CRS on Tx for both half and full
/* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
......@@ -766,8 +755,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
s32 ret_val;
u16 kum_reg_data;
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
/* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
......@@ -899,8 +887,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
}
/*
* Clear all of the statistics registers (clear on read). It is
/* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
......@@ -945,8 +932,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
reg |= (1 << 28);
ew32(TARC(1), reg);
/*
* Disable IPv6 extension header parsing because some malformed
/* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
reg = er32(RFCTL);
......@@ -979,8 +965,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/*
* Options:
/* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
......@@ -1006,8 +991,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
break;
}
/*
* Options:
/* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
......@@ -1065,8 +1049,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/*
* Do not init these registers when the HW is in IAMT mode, since the
/* Do not init these registers when the HW is in IAMT mode, since the
* firmware will have already initialized them. We only initialize
* them if the HW is not in IAMT mode.
*/
......@@ -1087,8 +1070,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
}
/*
* Workaround: Disable padding in Kumeran interface in the MAC
/* Workaround: Disable padding in Kumeran interface in the MAC
* and in the PHY to avoid CRC errors.
*/
ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
......@@ -1121,8 +1103,7 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
/*
* Set the mac to wait the maximum time between each
/* Set the mac to wait the maximum time between each
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
......@@ -1352,8 +1333,7 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
/*
* If there's an alternate MAC address place it in RAR0
/* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
......
This diff is collapsed.
......@@ -185,8 +185,7 @@
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/*
* Use byte values for the following shift parameters
/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
......@@ -242,8 +241,7 @@
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
/*
* Bit definitions for the Management Data IO (MDIO) and Management Data
/* Bit definitions for the Management Data IO (MDIO) and Management Data
* Clock (MDC) pins in the Device Control Register.
*/
......@@ -424,8 +422,7 @@
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
/*
* This defines the bits that are set in the Interrupt Mask
/* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
......@@ -475,8 +472,7 @@
/* 802.1q VLAN Packet Size */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
/* Receive Address */
/*
/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
......@@ -723,8 +719,7 @@
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
/* Bit definitions for valid PHY IDs. */
/*
/* Bit definitions for valid PHY IDs.
* I = Integrated
* E = External
*/
......@@ -762,8 +757,7 @@
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
/*
* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
* 0=Normal 10BASE-T Rx Threshold
*/
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
......@@ -779,14 +773,12 @@
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
/*
* Number of times we will attempt to autonegotiate before downshifting if we
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
/*
* Number of times we will attempt to autonegotiate before downshifting if we
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
......@@ -808,8 +800,7 @@
#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
((reg) & MAX_PHY_REG_ADDRESS))
/*
* Bits...
/* Bits...
* 15-5: page
* 4-0: register offset
*/
......
......@@ -161,8 +161,7 @@ struct e1000_info;
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
/*
* Count for polling __E1000_RESET condition every 10-20msec.
/* Count for polling __E1000_RESET condition every 10-20msec.
* Experimentation has shown the reset can take approximately 210msec.
*/
#define E1000_CHECK_RESET_COUNT 25
......@@ -172,8 +171,7 @@ struct e1000_info;
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
/*
* in the case of WTHRESH, it appears at least the 82571/2 hardware
/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
* WTHRESH=4, so a setting of 5 gives the most efficient bus
* utilization but to avoid possible Tx stalls, set it to 1
......@@ -214,8 +212,7 @@ struct e1000_ps_page {
u64 dma; /* must be u64 - written to hw */
};
/*
* wrappers around a pointer to a socket buffer,
/* wrappers around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer {
......@@ -305,9 +302,7 @@ struct e1000_adapter {
u16 tx_itr;
u16 rx_itr;
/*
* Tx
*/
/* Tx */
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
......@@ -340,9 +335,7 @@ struct e1000_adapter {
u32 tx_fifo_size;
u32 tx_dma_failed;
/*
* Rx
*/
/* Rx */
bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
int work_to_do) ____cacheline_aligned_in_smp;
void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
......
......@@ -214,7 +214,8 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
mac->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
* for the switch() below to work */
* for the switch() below to work
*/
if ((spd & 1) || (dplx & ~1))
goto err_inval;
......@@ -263,8 +264,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
/*
* When SoL/IDER sessions are active, autoneg/speed/duplex
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (hw->phy.ops.check_reset_block &&
......@@ -273,8 +273,7 @@ static int e1000_set_settings(struct net_device *netdev,
return -EINVAL;
}
/*
* MDI setting is only allowed when autoneg enabled because
/* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
......@@ -316,8 +315,7 @@ static int e1000_set_settings(struct net_device *netdev,
/* MDI-X => 2; MDI => 1; Auto => 3 */
if (ecmd->eth_tp_mdix_ctrl) {
/*
* fix up the value for auto (3 => 0) as zero is mapped
/* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
......@@ -454,8 +452,8 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
/* ethtool doesn't use anything past this point, so all this
* code is likely legacy junk for apps that may or may not
* exist */
* code is likely legacy junk for apps that may or may not exist
*/
if (hw->phy.type == e1000_phy_m88) {
e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (u32)phy_data; /* cable length */
......@@ -598,8 +596,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
if (ret_val)
goto out;
/*
* Update the checksum over the first part of the EEPROM if needed
/* Update the checksum over the first part of the EEPROM if needed
* and flush shadow RAM for applicable controllers
*/
if ((first_word <= NVM_CHECKSUM_REG) ||
......@@ -623,8 +620,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, e1000e_driver_version,
sizeof(drvinfo->version));
/*
* EEPROM image version # is reported as firmware version # for
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
......@@ -708,8 +704,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
e1000e_down(adapter);
/*
* We can't just free everything and then setup again, because the
/* We can't just free everything and then setup again, because the
* ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
* structs. First, attempt to allocate new resources...
*/
......@@ -813,8 +808,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 mask;
u32 wlock_mac = 0;
/*
* The status register is Read Only, so a write should fail.
/* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
switch (mac->type) {
......@@ -996,8 +990,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
if (!shared_int) {
/*
* Disable the interrupt to be reported in
/* Disable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was posted to the bus, the
......@@ -1015,8 +1008,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
}
/*
* Enable the interrupt to be reported in
/* Enable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was not posted to the bus, the
......@@ -1034,8 +1026,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
if (!shared_int) {
/*
* Disable the other interrupts to be reported in
/* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
* an interrupt was posted to the bus, the
......@@ -1378,8 +1369,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
hw->phy.type == e1000_phy_m88) {
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
} else {
/*
* Set the ILOS bit on the fiber Nic if half duplex link is
/* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
if ((er32(STATUS) & E1000_STATUS_FD) == 0)
......@@ -1388,8 +1378,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl_reg);
/*
* Disable the receiver on the PHY so when a cable is plugged in, the
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
if (hw->phy.type == e1000_phy_m88)
......@@ -1408,8 +1397,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* special requirements for 82571/82572 fiber adapters */
/*
* jump through hoops to make sure link is up because serdes
/* jump through hoops to make sure link is up because serdes
* link is hardwired up
*/
ctrl |= E1000_CTRL_SLU;
......@@ -1429,8 +1417,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl);
}
/*
* special write to serdes control register to enable SerDes analog
/* special write to serdes control register to enable SerDes analog
* loopback
*/
#define E1000_SERDES_LB_ON 0x410
......@@ -1448,8 +1435,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
u32 ctrlext = er32(CTRL_EXT);
u32 ctrl = er32(CTRL);
/*
* save CTRL_EXT to restore later, reuse an empty variable (unused
/* save CTRL_EXT to restore later, reuse an empty variable (unused
* on mac_type 80003es2lan)
*/
adapter->tx_fifo_head = ctrlext;
......@@ -1585,8 +1571,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ew32(RDT(0), rx_ring->count - 1);
/*
* Calculate the loop count based on the largest descriptor ring
/* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
......@@ -1627,8 +1612,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
l++;
if (l == rx_ring->count)
l = 0;
/*
* time + 20 msecs (200 msecs on 2.4) is more than
/* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
......@@ -1649,10 +1633,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
/*
* PHY loopback cannot be performed if SoL/IDER
* sessions are active
*/
/* PHY loopback cannot be performed if SoL/IDER sessions are active */
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
......@@ -1686,8 +1667,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
int i = 0;
hw->mac.serdes_has_link = false;
/*
* On some blade server designs, link establishment
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes
*/
do {
......@@ -1701,8 +1681,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(hw);
if (hw->mac.autoneg)
/*
* On some Phy/switch combinations, link establishment
/* On some Phy/switch combinations, link establishment
* can take a few seconds more than expected.
*/
msleep(5000);
......
......@@ -85,8 +85,7 @@ enum e1e_registers {
E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
/*
* Convenience macros
/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
......@@ -800,8 +799,7 @@ struct e1000_mac_operations {
s32 (*read_mac_addr)(struct e1000_hw *);
};
/*
* When to use various PHY register access functions:
/* When to use various PHY register access functions:
*
* Func Caller
* Function Does Does When to use
......
This diff is collapsed.
This diff is collapsed.
......@@ -143,8 +143,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
return hw->mac.tx_pkt_filtering;
}
/*
* If we can't read from the host interface for whatever
/* If we can't read from the host interface for whatever
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
......@@ -163,8 +162,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
hdr->checksum = 0;
csum = e1000_calculate_checksum((u8 *)hdr,
E1000_MNG_DHCP_COOKIE_LENGTH);
/*
* If either the checksums or signature don't match, then
/* If either the checksums or signature don't match, then
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
......@@ -252,8 +250,7 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
/* Calculate length in DWORDs */
length >>= 2;
/*
* The device driver writes the relevant command block into the
/* The device driver writes the relevant command block into the
* ram area.
*/
for (i = 0; i < length; i++) {
......
This diff is collapsed.
......@@ -279,8 +279,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
e1e_flush();
udelay(1);
/*
* Read "Status Register" repeatedly until the LSB is cleared.
/* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out.
......@@ -321,8 +320,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
u32 i, eerd = 0;
s32 ret_val = 0;
/*
* A check for invalid values: offset too large, too many words,
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
......@@ -364,8 +362,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 ret_val;
u16 widx = 0;
/*
* A check for invalid values: offset too large, too many words,
/* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
......@@ -393,8 +390,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
e1000_standby_nvm(hw);
/*
* Some SPI eeproms use the 8th address bit embedded in the
/* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
......@@ -461,8 +457,7 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
return ret_val;
}
/*
* if nvm_data is not ptr guard the PBA must be in legacy format which
/* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pba_ptr is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
......
......@@ -32,11 +32,9 @@
#include "e1000.h"
/*
* This is the only thing that needs to be changed to adjust the
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
......@@ -49,12 +47,10 @@ module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
"Maximum size of packet that is copied to a new buffer on receive");
/*
* All parameters are treated the same, as an integer array of values.
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] \
......@@ -63,8 +59,7 @@ MODULE_PARM_DESC(copybreak,
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/*
* Transmit Interrupt Delay in units of 1.024 microseconds
/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
......@@ -74,8 +69,7 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/*
* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
......@@ -84,8 +78,7 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/*
* Receive Interrupt Delay in units of 1.024 microseconds
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
......@@ -94,8 +87,7 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/*
* Receive Absolute Interrupt Delay in units of 1.024 microseconds
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
......@@ -103,8 +95,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/*
* Interrupt Throttle Rate (interrupts/sec)
/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
*/
......@@ -113,8 +104,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define MAX_ITR 100000
#define MIN_ITR 100
/*
* IntMode (Interrupt Mode)
/* IntMode (Interrupt Mode)
*
* Valid Range: varies depending on kernel configuration & hardware support
*
......@@ -132,8 +122,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
#define MAX_INTMODE 2
#define MIN_INTMODE 0
/*
* Enable Smart Power Down of the PHY
/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
......@@ -141,8 +130,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
/*
* Enable Kumeran Lock Loss workaround
/* Enable Kumeran Lock Loss workaround
*
* Valid Range: 0, 1
*
......@@ -150,8 +138,7 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
*/
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
/*
* Write Protect NVM
/* Write Protect NVM
*
* Valid Range: 0, 1
*
......@@ -159,8 +146,7 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
*/
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
/*
* Enable CRC Stripping
/* Enable CRC Stripping
*
* Valid Range: 0, 1
*
......@@ -351,8 +337,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
/*
* Make sure a message is printed for non-special
/* Make sure a message is printed for non-special
* values. And in case of an invalid option, display
* warning, use default and go through itr/itr_setting
* adjustment logic below
......@@ -361,14 +346,12 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
e1000_validate_option(&adapter->itr, &opt, adapter))
adapter->itr = opt.def;
} else {
/*
* If no option specified, use default value and go
/* If no option specified, use default value and go
* through the logic below to adjust itr/itr_setting
*/
adapter->itr = opt.def;
/*
* Make sure a message is printed for non-special
/* Make sure a message is printed for non-special
* default values
*/
if (adapter->itr > 4)
......@@ -400,8 +383,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
opt.name);
break;
default:
/*
* Save the setting, because the dynamic bits
/* Save the setting, because the dynamic bits
* change itr.
*
* Clear the lower two bits because
......
This diff is collapsed.
......@@ -1028,6 +1028,15 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
* continue to check for link.
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
/* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
*/
ret_val = igb_config_fc_after_link_up(hw);
if (ret_val)
hw_dbg("Error configuring flow control\n");
} else {
ret_val = igb_check_for_copper_link(hw);
}
......@@ -1345,7 +1354,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
**/
static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
u32 ctrl_ext, ctrl_reg, reg;
u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS;
u16 data;
......@@ -1433,27 +1442,45 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
/*
* We force flow control to prevent the CTRL register values from being
* overwritten by the autonegotiated flow control values
*/
reg |= E1000_PCS_LCTL_FORCE_FCTRL;
if (pcs_autoneg) {
/* Set PCS register for autoneg */
reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
/* Disable force flow control for autoneg */
reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
/* Configure flow control advertisement for autoneg */
anadv_reg = rd32(E1000_PCS_ANADV);
anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
switch (hw->fc.requested_mode) {
case e1000_fc_full:
case e1000_fc_rx_pause:
anadv_reg |= E1000_TXCW_ASM_DIR;
anadv_reg |= E1000_TXCW_PAUSE;
break;
case e1000_fc_tx_pause:
anadv_reg |= E1000_TXCW_ASM_DIR;
break;
default:
break;
}
wr32(E1000_PCS_ANADV, anadv_reg);
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
/* Force flow control for forced link */
reg |= E1000_PCS_LCTL_FORCE_FCTRL;
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
wr32(E1000_PCS_LCTL, reg);
if (!igb_sgmii_active_82575(hw))
if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
igb_force_mac_fc(hw);
return ret_val;
......@@ -1927,6 +1954,12 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
hw->dev_spec._82575.global_device_reset = false;
/* due to hw errata, global device reset doesn't always
* work on 82580
*/
if (hw->mac.type == e1000_82580)
global_device_reset = false;
/* Get current control state. */
ctrl = rd32(E1000_CTRL);
......
......@@ -431,6 +431,10 @@
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
/* Transmit Config Word */
#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
......@@ -539,6 +543,9 @@
/* mPHY Near End Digital Loopback Override Bit */
#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
......
......@@ -35,11 +35,42 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw);
static void igb_put_hw_semaphore_i210(struct e1000_hw *hw);
static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw);
/**
* igb_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
*/
static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
s32 ret_val = E1000_SUCCESS;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = rd32(E1000_SWSM);
wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
udelay(50);
}
if (i == timeout) {
/* Release semaphores */
igb_put_hw_semaphore(hw);
hw_dbg("Driver can't access the NVM\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
out:
return ret_val;
}
/**
* igb_acquire_nvm_i210 - Request for access to EEPROM
......@@ -67,6 +98,23 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
/**
* igb_put_hw_semaphore_i210 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
*/
static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
swsm = rd32(E1000_SWSM);
swsm &= ~E1000_SWSM_SWESMBI;
wr32(E1000_SWSM, swsm);
}
/**
* igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
......@@ -137,60 +185,6 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
igb_put_hw_semaphore_i210(hw);
}
/**
* igb_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
s32 ret_val = E1000_SUCCESS;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = rd32(E1000_SWSM);
wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
udelay(50);
}
if (i == timeout) {
/* Release semaphores */
igb_put_hw_semaphore(hw);
hw_dbg("Driver can't access the NVM\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
out:
return ret_val;
}
/**
* igb_put_hw_semaphore_i210 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
swsm = rd32(E1000_SWSM);
swsm &= ~E1000_SWSM_SWESMBI;
wr32(E1000_SWSM, swsm);
}
/**
* igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
......@@ -228,49 +222,6 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
return status;
}
/**
* igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow RAM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow RAM
*
* Writes data to Shadow RAM at offset using EEWR register.
*
* If e1000_update_nvm_checksum is not called after this function , the
* data will not be committed to FLASH and also Shadow RAM will most likely
* contain an invalid checksum.
*
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
**/
s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to write in bursts than synchronizing access for each word. */
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
status = igb_write_nvm_srwr(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
} else {
status = E1000_ERR_SWFW_SYNC;
}
if (status != E1000_SUCCESS)
break;
}
return status;
}
/**
* igb_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
......@@ -328,6 +279,50 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
return ret_val;
}
/**
* igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow RAM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow RAM
*
* Writes data to Shadow RAM at offset using EEWR register.
*
* If e1000_update_nvm_checksum is not called after this function , the
* data will not be committed to FLASH and also Shadow RAM will most likely
* contain an invalid checksum.
*
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
*/
s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to write in bursts than synchronizing access for each word.
*/
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
status = igb_write_nvm_srwr(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
} else {
status = E1000_ERR_SWFW_SYNC;
}
if (status != E1000_SUCCESS)
break;
}
return status;
}
/**
* igb_read_nvm_i211 - Read NVM wrapper function for I211
* @hw: pointer to the HW structure
......@@ -350,16 +345,40 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_ID_LED_SETTINGS:
case NVM_INIT_CTRL_2:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_INIT_CTRL_4:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_1_CFG:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_0_2_CFG:
igb_read_invm_i211(hw, offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_COMPAT:
*data = ID_LED_DEFAULT_I210;
break;
case NVM_ID_LED_SETTINGS:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
}
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
......@@ -612,6 +631,28 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
return ret_val;
}
/**
* igb_pool_flash_update_done_i210 - Pool FLUDONE status.
* @hw: pointer to the HW structure
*
*/
static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_NVM;
u32 i, reg;
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
reg = rd32(E1000_EECD);
if (reg & E1000_EECD_FLUDONE_I210) {
ret_val = E1000_SUCCESS;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
......@@ -641,28 +682,6 @@ s32 igb_update_flash_i210(struct e1000_hw *hw)
return ret_val;
}
/**
* igb_pool_flash_update_done_i210 - Pool FLUDONE status.
* @hw: pointer to the HW structure
*
**/
s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_NVM;
u32 i, reg;
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
reg = rd32(E1000_EECD);
if (reg & E1000_EECD_FLUDONE_I210) {
ret_val = E1000_SUCCESS;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igb_valid_led_default_i210 - Verify a valid default LED config
* @hw: pointer to the HW structure
......
......@@ -84,4 +84,10 @@ enum E1000_INVM_STRUCTURE_TYPE {
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
/* NVM offset defaults for i211 device */
#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
#endif
......@@ -839,6 +839,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = 0;
u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
......@@ -1040,6 +1041,129 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
}
}
/* Check for the case where we have SerDes media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if ((hw->phy.media_type == e1000_media_type_internal_serdes)
&& mac->autoneg) {
/* Read the PCS_LSTS and check to see if AutoNeg
* has completed.
*/
pcs_status_reg = rd32(E1000_PCS_LSTAT);
if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
hw_dbg("PCS Auto Neg has not completed.\n");
return ret_val;
}
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (PCS_ANADV) and the Auto_Negotiation Base
* Page Ability Register (PCS_LPAB) to determine how
* flow control was negotiated.
*/
pcs_adv_reg = rd32(E1000_PCS_ANADV);
pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
/* Two bits in the Auto Negotiation Advertisement Register
* (PCS_ANADV) and two bits in the Auto Negotiation Base
* Page Ability Register (PCS_LPAB) determine flow control
* for both the PHY and the link partner. The following
* table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
* 1999, describes these PAUSE resolution bits and how flow
* control is determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
*-------|---------|-------|---------|--------------------
* 0 | 0 | DC | DC | e1000_fc_none
* 0 | 1 | 0 | DC | e1000_fc_none
* 0 | 1 | 1 | 0 | e1000_fc_none
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
* 1 | 0 | 0 | DC | e1000_fc_none
* 1 | DC | 1 | DC | e1000_fc_full
* 1 | 1 | 0 | 0 | e1000_fc_none
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*
* Are both PAUSE bits set to 1? If so, this implies
* Symmetric Flow Control is enabled at both ends. The
* ASM_DIR bits are irrelevant per the spec.
*
* For Symmetric Flow Control:
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 1 | DC | 1 | DC | e1000_fc_full
*
*/
if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
/* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames.
*/
if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full;
hw_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
hw_dbg("Flow Control = Rx PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
*/
else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
(pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause;
hw_dbg("Flow Control = Tx PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*/
else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
(pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
!(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause;
hw_dbg("Flow Control = Rx PAUSE frames only.\n");
} else {
/* Per the IEEE spec, at this point flow control
* should be disabled.
*/
hw->fc.current_mode = e1000_fc_none;
hw_dbg("Flow Control = NONE.\n");
}
/* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
ret_val = igb_force_mac_fc(hw);
if (ret_val) {
hw_dbg("Error forcing flow control settings\n");
return ret_val;
}
}
out:
return ret_val;
......
......@@ -438,7 +438,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
s32 ret_val;
s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/*
......@@ -448,22 +448,21 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -E1000_ERR_NVM;
goto out;
return ret_val;
}
ret_val = hw->nvm.ops.acquire(hw);
if (ret_val)
goto out;
msleep(10);
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
ret_val = igb_ready_nvm_eeprom(hw);
ret_val = nvm->ops.acquire(hw);
if (ret_val)
goto release;
return ret_val;
ret_val = igb_ready_nvm_eeprom(hw);
if (ret_val) {
nvm->ops.release(hw);
return ret_val;
}
igb_standby_nvm(hw);
......@@ -497,13 +496,10 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
usleep_range(1000, 2000);
nvm->ops.release(hw);
}
msleep(10);
release:
hw->nvm.ops.release(hw);
out:
return ret_val;
}
......
......@@ -42,6 +42,8 @@
struct igb_adapter;
#define E1000_PCS_CFG_IGN_SD 1
/* Interrupt defines */
#define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
......
......@@ -1624,6 +1624,20 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
reg &= ~E1000_CONNSW_ENRGSRC;
wr32(E1000_CONNSW, reg);
/* Unset sigdetect for SERDES loopback on
* 82580 and i350 devices.
*/
switch (hw->mac.type) {
case e1000_82580:
case e1000_i350:
reg = rd32(E1000_PCS_CFG0);
reg |= E1000_PCS_CFG_IGN_SD;
wr32(E1000_PCS_CFG0, reg);
break;
default:
break;
}
/* Set PCS register for forced speed */
reg = rd32(E1000_PCS_LCTL);
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
......
......@@ -47,23 +47,27 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
char buf[256];
int bytes_not_copied;
char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
len = snprintf(buf, sizeof(buf), "%s: %s\n",
adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
if (count < len)
buf = kasprintf(GFP_KERNEL, "%s: %s\n",
adapter->netdev->name,
ixgbe_dbg_reg_ops_buf);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
bytes_not_copied = copy_to_user(buffer, buf, len);
if (bytes_not_copied < 0)
return bytes_not_copied;
}
*ppos = len;
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
......@@ -79,7 +83,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
int bytes_not_copied;
int len;
/* don't allow partial writes */
if (*ppos != 0)
......@@ -87,14 +91,15 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
return -ENOSPC;
bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
if (bytes_not_copied < 0)
return bytes_not_copied;
else if (bytes_not_copied < count)
count -= bytes_not_copied;
else
return -ENOSPC;
ixgbe_dbg_reg_ops_buf[count] = '\0';
len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf,
sizeof(ixgbe_dbg_reg_ops_buf)-1,
ppos,
buffer,
count);
if (len < 0)
return len;
ixgbe_dbg_reg_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
u32 reg, value;
......@@ -147,23 +152,27 @@ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
char buf[256];
int bytes_not_copied;
char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
len = snprintf(buf, sizeof(buf), "%s: %s\n",
adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
if (count < len)
buf = kasprintf(GFP_KERNEL, "%s: %s\n",
adapter->netdev->name,
ixgbe_dbg_netdev_ops_buf);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
bytes_not_copied = copy_to_user(buffer, buf, len);
if (bytes_not_copied < 0)
return bytes_not_copied;
}
*ppos = len;
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
......@@ -179,7 +188,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
int bytes_not_copied;
int len;
/* don't allow partial writes */
if (*ppos != 0)
......@@ -187,15 +196,15 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
return -ENOSPC;
bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
buffer, count);
if (bytes_not_copied < 0)
return bytes_not_copied;
else if (bytes_not_copied < count)
count -= bytes_not_copied;
else
return -ENOSPC;
ixgbe_dbg_netdev_ops_buf[count] = '\0';
len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf,
sizeof(ixgbe_dbg_netdev_ops_buf)-1,
ppos,
buffer,
count);
if (len < 0)
return len;
ixgbe_dbg_netdev_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
......
......@@ -1338,26 +1338,29 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
if (hlen < sizeof(struct iphdr))
return hdr.network - data;
/* record next protocol */
/* record next protocol if header is present */
if (!hdr.ipv4->frag_off)
nexthdr = hdr.ipv4->protocol;
hdr.network += hlen;
} else if (protocol == __constant_htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
/* record next protocol */
nexthdr = hdr.ipv6->nexthdr;
hdr.network += sizeof(struct ipv6hdr);
hlen = sizeof(struct ipv6hdr);
#ifdef IXGBE_FCOE
} else if (protocol == __constant_htons(ETH_P_FCOE)) {
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
return max_len;
hdr.network += FCOE_HEADER_LEN;
hlen = FCOE_HEADER_LEN;
#endif
} else {
return hdr.network - data;
}
/* relocate pointer to start of L4 header */
hdr.network += hlen;
/* finally sort out TCP/UDP */
if (nexthdr == IPPROTO_TCP) {
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment