Commit cb79abc7 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to igb and e1000e.

Akeem provides 3 igb patches, the first resets the link when EEE is enabled
or disabled if the link is up.  His second patch changes a register read
which normally stores of the read value to "just-read" so that hardware
can accurately latch the register read.  Lastly, he adds rcu_lock to avoid
a possible race condition with igb_update_stats function.

Mitch provides a fix for SR-IOV, where MSI-X interrupts are required, so
make sure that MSI-X is enabled before allowing the user to turn on SR-IOV.

Alex's igb patch make it so that we limit the lower bound for max_frame_size
to the size of a standard Ethernet frame.  This allows for feature parity
with other Intel based drivers such as ixgbe.

Carolyn adds a SKU for a flashless i210 device and a fix for get_fw_version()
so that it works for all parts for igb.  In addition, she has 2 igb patches
to refactor NVM code to accommodate devices with no flash.  Lastly, she
adds code to check for the failure of pci_disable_link_state() to attempt
to work around a problem found with some systems.

Laura provides the remaining 2 igb patches.  One removing the hard-coded
value for the size of the RETA indirection table, and creates a macro instead
for the RETA indirection table.  The second adds the ethtool callbacks
necessary to change the RETA indirection table from userspace.

Bruce fixes a whitespace issue in a recent commit and resolves a jiffies
comparison warning by using time_after().

Li provides a fix for e1000e to avoid a kernel crash on shutdown by adding
one more check in e1000e_shutdown().  This is due to e1000e_shutdown()
trying to clear correctable errors on the upstream P2P bridge, when under
some cases we do not have the upstream P2P bridge.

v2:
 - fixed patch 11 conditional statement from < to <= based on feedback
   from Ben Hutchings
 - fixed patch 12 patch description (adding the commit summary) based
   on feedback from Sergei Shtylyov
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f8825669 22f8abaa
......@@ -1665,7 +1665,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
if (jiffies >= (time + 20)) {
if (time_after(jiffies, time + 20)) {
ret_val = 14; /* error code for time out error */
break;
}
......
......@@ -234,6 +234,7 @@ union e1000_rx_desc_extended {
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
......
......@@ -64,8 +64,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
static const struct e1000_info *e1000_info_tbl[] = {
[board_82571] = &e1000_82571_info,
[board_82572] = &e1000_82572_info,
......@@ -6001,11 +5999,18 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
* downstream port of the pci-e switch.
*
* We don't have the associated upstream bridge while assigning
* the PCI device into guest. For example, the KVM on power is
* one of the cases.
*/
if (adapter->flags & FLAG_IS_QUAD_PORT) {
struct pci_dev *us_dev = pdev->bus->self;
u16 devctl;
if (!us_dev)
return 0;
pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));
......@@ -6019,38 +6024,73 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
return 0;
}
#ifdef CONFIG_PCIEASPM
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
/**
* e1000e_disable_aspm - Disable ASPM states
* @pdev: pointer to PCI device struct
* @state: bit-mask of ASPM states to disable
*
* Some devices *must* have certain ASPM states disabled per hardware errata.
**/
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
struct pci_dev *parent = pdev->bus->self;
u16 aspm_dis_mask = 0;
u16 pdev_aspmc, parent_aspmc;
switch (state) {
case PCIE_LINK_STATE_L0S:
case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
/* fall-through - can't have L1 without L0s */
case PCIE_LINK_STATE_L1:
aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
break;
default:
return;
}
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
if (parent) {
pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
&parent_aspmc);
parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
}
/* Nothing to do if the ASPM states to be disabled already are */
if (!(pdev_aspmc & aspm_dis_mask) &&
(!parent || !(parent_aspmc & aspm_dis_mask)))
return;
dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
(aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
"L0s" : "",
(aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
"L1" : "");
#ifdef CONFIG_PCIEASPM
pci_disable_link_state_locked(pdev, state);
}
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
u16 aspm_ctl = 0;
if (state & PCIE_LINK_STATE_L0S)
aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
if (state & PCIE_LINK_STATE_L1)
aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
/* Double-check ASPM control. If not disabled by the above, the
* BIOS is preventing that from happening (or CONFIG_PCIEASPM is
* not enabled); override by writing PCI config space directly.
*/
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
if (!(aspm_dis_mask & pdev_aspmc))
return;
#endif
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
if (pdev->bus->self)
pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
aspm_ctl);
}
#endif
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
(state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
(state & PCIE_LINK_STATE_L1) ? "L1" : "");
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
__e1000e_disable_aspm(pdev, state);
if (parent)
pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
aspm_dis_mask);
}
#ifdef CONFIG_PM
......
......@@ -238,6 +238,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
......@@ -250,7 +251,6 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = 15;
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
......@@ -273,63 +273,30 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
} else {
nvm->type = e1000_nvm_flash_hw;
}
/* NVM Function Pointers */
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
nvm->ops.acquire = igb_acquire_nvm_82575;
nvm->ops.release = igb_release_nvm_82575;
nvm->ops.write = igb_write_nvm_spi;
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.write = igb_write_nvm_spi;
/* override generic family function pointers for specific descendants */
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
break;
case e1000_i354:
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
nvm->ops.acquire = igb_acquire_nvm_82575;
nvm->ops.release = igb_release_nvm_82575;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.write = igb_write_nvm_spi;
break;
case e1000_i210:
nvm->ops.validate = igb_validate_nvm_checksum_i210;
nvm->ops.update = igb_update_nvm_checksum_i210;
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_srrd_i210;
nvm->ops.write = igb_write_nvm_srwr_i210;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
break;
case e1000_i211:
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_i211;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
nvm->ops.write = NULL;
break;
default:
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
nvm->ops.acquire = igb_acquire_nvm_82575;
nvm->ops.release = igb_release_nvm_82575;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.write = igb_write_nvm_spi;
break;
}
......@@ -516,6 +483,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
case E1000_DEV_ID_I210_COPPER_FLASHLESS:
case E1000_DEV_ID_I210_SERDES_FLASHLESS:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
......@@ -601,6 +570,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* NVM initialization */
ret_val = igb_init_nvm_params_82575(hw);
switch (hw->mac.type) {
case e1000_i210:
case e1000_i211:
ret_val = igb_init_nvm_params_i210(hw);
break;
default:
break;
}
if (ret_val)
goto out;
......@@ -1320,7 +1298,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
**/
static s32 igb_reset_hw_82575(struct e1000_hw *hw)
{
u32 ctrl, icr;
u32 ctrl;
s32 ret_val;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
......@@ -1365,7 +1343,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
icr = rd32(E1000_ICR);
rd32(E1000_ICR);
/* Install any alternate MAC address into RAR0 */
ret_val = igb_check_alt_mac_addr(hw);
......@@ -2103,10 +2081,9 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
s32 ret_val = 0;
/* BH SW mailbox bit in SW_FW_SYNC */
u16 swmbsw_mask = E1000_SW_SYNCH_MB;
u32 ctrl, icr;
u32 ctrl;
bool global_device_reset = hw->dev_spec._82575.global_device_reset;
hw->dev_spec._82575.global_device_reset = false;
/* due to hw errata, global device reset doesn't always
......@@ -2165,7 +2142,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
icr = rd32(E1000_ICR);
rd32(E1000_ICR);
ret_val = igb_reset_mdicnfg_82580(hw);
if (ret_val)
......
......@@ -620,6 +620,7 @@
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
......@@ -627,6 +628,11 @@
#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
/* Secure FLASH mode requires removing MSb */
#define E1000_I210_FW_PTR_MASK 0x7FFF
/* Firmware code revision field word offset*/
#define E1000_I210_FW_VER_OFFSET 328
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_FLUDONE_ATTEMPTS 20000
......@@ -665,20 +671,26 @@
#define NVM_INIT_CTRL_4 0x0013
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
/* NVM version defines */
#define NVM_ETRACK_WORD 0x0042
#define NVM_ETRACK_HIWORD 0x0043
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
/* NVM version defines */
#define NVM_MAJOR_MASK 0xF000
#define NVM_MINOR_MASK 0x0FF0
#define NVM_BUILD_MASK 0x000F
#define NVM_IMAGE_ID_MASK 0x000F
#define NVM_COMB_VER_MASK 0x00FF
#define NVM_MAJOR_SHIFT 12
#define NVM_MINOR_SHIFT 4
#define NVM_COMB_VER_SHFT 8
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETRACK_SHIFT 16
#define NVM_ETRACK_VALID 0x8000
#define NVM_NEW_DEC_MASK 0x0F00
#define NVM_HEX_CONV 16
#define NVM_HEX_TENS 10
#define NVM_ETS_CFG 0x003E
#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
#define NVM_ETS_LTHRES_DELTA_SHIFT 6
......
......@@ -67,6 +67,8 @@ struct e1000_hw;
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
#define E1000_DEV_ID_I211_COPPER 0x1539
#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
#define E1000_DEV_ID_I354_SGMII 0x1F41
......@@ -110,6 +112,7 @@ enum e1000_nvm_type {
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
e1000_nvm_invm,
e1000_nvm_flash_sw
};
......
......@@ -335,57 +335,101 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
* igb_read_nvm_i211 - Read NVM wrapper function for I211
* igb_read_invm_word_i210 - Reads OTP
* @hw: pointer to the HW structure
* @address: the word address (aka eeprom offset) to read
* @data: pointer to the data read
*
* Reads 16-bit words from the OTP. Return error when the word is not
* stored in OTP.
**/
static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
{
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
u32 invm_dword;
u16 i;
u8 record_type, word_address;
for (i = 0; i < E1000_INVM_SIZE; i++) {
invm_dword = rd32(E1000_INVM_DATA_REG(i));
/* Get record type */
record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
break;
if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
if (word_address == address) {
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
hw_dbg("Read INVM Word 0x%02x = %x",
address, *data);
status = E1000_SUCCESS;
break;
}
}
}
if (status != E1000_SUCCESS)
hw_dbg("Requested word 0x%02x not found in OTP\n", address);
return status;
}
/**
* igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
* @words: number of words to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
u16 words __always_unused, u16 *data)
{
s32 ret_val = E1000_SUCCESS;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
case NVM_MAC_ADDR:
ret_val = igb_read_invm_i211(hw, offset, &data[0]);
ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
&data[1]);
ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
&data[2]);
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_INIT_CTRL_4:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_1_CFG:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_0_2_CFG:
igb_read_invm_i211(hw, offset, data);
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_ID_LED_SETTINGS:
ret_val = igb_read_invm_i211(hw, (u8)offset, data);
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
......@@ -410,48 +454,6 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
return ret_val;
}
/**
* igb_read_invm_i211 - Reads OTP
* @hw: pointer to the HW structure
* @address: the word address (aka eeprom offset) to read
* @data: pointer to the data read
*
* Reads 16-bit words from the OTP. Return error when the word is not
* stored in OTP.
**/
s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
{
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
u32 invm_dword;
u16 i;
u8 record_type, word_address;
for (i = 0; i < E1000_INVM_SIZE; i++) {
invm_dword = rd32(E1000_INVM_DATA_REG(i));
/* Get record type */
record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
break;
if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
if (word_address == (u8)address) {
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
hw_dbg("Read INVM Word 0x%02x = %x",
address, *data);
status = E1000_SUCCESS;
break;
}
}
}
if (status != E1000_SUCCESS)
hw_dbg("Requested word 0x%02x not found in OTP\n", address);
return status;
}
/**
* igb_read_invm_version - Reads iNVM version and image type
* @hw: pointer to the HW structure
......@@ -660,6 +662,23 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
return ret_val;
}
/**
* igb_get_flash_presence_i210 - Check if flash device is detected.
* @hw: pointer to the HW structure
*
**/
bool igb_get_flash_presence_i210(struct e1000_hw *hw)
{
u32 eec = 0;
bool ret_val = false;
eec = rd32(E1000_EECD);
if (eec & E1000_EECD_FLASH_DETECTED_I210)
ret_val = true;
return ret_val;
}
/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
......@@ -786,3 +805,33 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
{
return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}
/**
* igb_init_nvm_params_i210 - Init NVM func ptrs.
* @hw: pointer to the HW structure
**/
s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
{
s32 ret_val = 0;
struct e1000_nvm_info *nvm = &hw->nvm;
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
/* NVM Function Pointers */
if (igb_get_flash_presence_i210(hw)) {
hw->nvm.type = e1000_nvm_flash_hw;
nvm->ops.read = igb_read_nvm_srrd_i210;
nvm->ops.write = igb_write_nvm_srwr_i210;
nvm->ops.validate = igb_validate_nvm_checksum_i210;
nvm->ops.update = igb_update_nvm_checksum_i210;
} else {
hw->nvm.type = e1000_nvm_invm;
nvm->ops.read = igb_read_invm_i210;
nvm->ops.write = NULL;
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
}
return ret_val;
}
......@@ -35,20 +35,19 @@ extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
extern s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 *data);
extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 data);
extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
......
......@@ -709,11 +709,16 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
**/
void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
{
u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
u16 fw_version;
u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
u8 q, hval, rem, result;
u16 comb_verh, comb_verl, comb_offset;
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
/* basic eeprom version numbers and bits used vary by part and by tool
* used to create the nvm images. Check which data format we have.
*/
hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
switch (hw->mac.type) {
case e1000_i211:
igb_read_invm_version(hw, fw_vers);
......@@ -721,30 +726,30 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
case e1000_82575:
case e1000_82576:
case e1000_82580:
case e1000_i354:
case e1000_i350:
case e1000_i210:
/* Use this format, unless EETRACK ID exists,
* then use alternate format
*/
if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
>> NVM_MAJOR_SHIFT;
fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
>> NVM_MINOR_SHIFT;
fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
goto etrack_id;
}
break;
default:
case e1000_i210:
if (!(igb_get_flash_presence_i210(hw))) {
igb_read_invm_version(hw, fw_vers);
return;
}
/* basic eeprom version numbers */
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
/* etrack id */
hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
switch (hw->mac.type) {
case e1000_i210:
case e1000_i354:
/* fall through */
case e1000_i350:
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
if ((comb_offset != 0x0) &&
(comb_offset != NVM_VER_INVALID)) {
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ 1), 1, &comb_verh);
......@@ -760,15 +765,42 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
fw_vers->or_major =
comb_verl >> NVM_COMB_VER_SHFT;
fw_vers->or_build =
((comb_verl << NVM_COMB_VER_SHFT)
| (comb_verh >> NVM_COMB_VER_SHFT));
(comb_verl << NVM_COMB_VER_SHFT)
| (comb_verh >> NVM_COMB_VER_SHFT);
fw_vers->or_patch =
comb_verh & NVM_COMB_VER_MASK;
}
}
break;
default:
break;
return;
}
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
>> NVM_MAJOR_SHIFT;
/* check for old style version format in newer images*/
if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
} else {
eeprom_verl = (fw_version & NVM_MINOR_MASK)
>> NVM_MINOR_SHIFT;
}
/* Convert minor value to hex before assigning to output struct
* Val to be converted will not be higher than 99, per tool output
*/
q = eeprom_verl / NVM_HEX_CONV;
hval = q * NVM_HEX_TENS;
rem = eeprom_verl % NVM_HEX_CONV;
result = hval + rem;
fw_vers->eep_minor = result;
etrack_id:
if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
| eeprom_verl;
}
return;
}
......@@ -44,6 +44,7 @@ struct e1000_fw_version {
u32 etrack_id;
u16 eep_major;
u16 eep_minor;
u16 eep_build;
u8 invm_major;
u8 invm_minor;
......
......@@ -343,6 +343,8 @@ struct hwmon_buff {
};
#endif
#define IGB_RETA_SIZE 128
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
......@@ -444,6 +446,8 @@ struct igb_adapter {
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
struct i2c_client *i2c_client;
u32 rss_indir_tbl_init;
u8 rss_indir_tbl[IGB_RETA_SIZE];
};
#define IGB_FLAG_HAS_MSI (1 << 0)
......@@ -480,6 +484,7 @@ extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
extern void igb_write_rss_indir_tbl(struct igb_adapter *);
extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
......
......@@ -1335,13 +1335,24 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
*data = 0;
/* Validate eeprom on all parts but i211 */
if (adapter->hw.mac.type != e1000_i211) {
/* Validate eeprom on all parts but flashless */
switch (hw->mac.type) {
case e1000_i210:
case e1000_i211:
if (igb_get_flash_presence_i210(hw)) {
if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
}
break;
default:
if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
break;
}
return *data;
}
......@@ -2672,7 +2683,9 @@ static int igb_set_eee(struct net_device *netdev,
igb_set_eee_i350(hw);
/* reset link */
if (!netif_running(netdev))
if (netif_running(netdev))
igb_reinit_locked(adapter);
else
igb_reset(adapter);
}
......@@ -2771,6 +2784,90 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
{
return IGB_RETA_SIZE;
}
static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
for (i = 0; i < IGB_RETA_SIZE; i++)
indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 reg = E1000_RETA(0);
u32 shift = 0;
int i = 0;
switch (hw->mac.type) {
case e1000_82575:
shift = 6;
break;
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
if (adapter->vfs_allocated_count)
shift = 3;
break;
default:
break;
}
while (i < IGB_RETA_SIZE) {
u32 val = 0;
int j;
for (j = 3; j >= 0; j--) {
val <<= 8;
val |= adapter->rss_indir_tbl[i + j];
}
wr32(reg, val << shift);
reg += 4;
i += 4;
}
}
static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int i;
u32 num_queues;
num_queues = adapter->rss_queues;
switch (hw->mac.type) {
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
if (adapter->vfs_allocated_count)
num_queues = 2;
break;
default:
break;
}
/* Verify user input. */
for (i = 0; i < IGB_RETA_SIZE; i++)
if (indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGB_RETA_SIZE; i++)
adapter->rss_indir_tbl[i] = indir[i];
igb_write_rss_indir_tbl(adapter);
return 0;
}
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
......@@ -2804,6 +2901,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh_indir = igb_get_rxfh_indir,
.set_rxfh_indir = igb_set_rxfh_indir,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
......
......@@ -85,6 +85,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
......@@ -1013,7 +1015,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
/* ixgbe_get_stats64() might access the rings on this vector,
/* igb_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
......@@ -1929,12 +1931,17 @@ void igb_set_fw_version(struct igb_adapter *adapter)
igb_get_fw_version(hw, &fw);
switch (hw->mac.type) {
case e1000_i210:
case e1000_i211:
snprintf(adapter->fw_version, sizeof(adapter->fw_version),
if (!(igb_get_flash_presence_i210(hw))) {
snprintf(adapter->fw_version,
sizeof(adapter->fw_version),
"%2d.%2d-%d",
fw.invm_major, fw.invm_minor, fw.invm_img_type);
fw.invm_major, fw.invm_minor,
fw.invm_img_type);
break;
}
/* fall through */
default:
/* if option is rom valid, display its version too */
if (fw.or_valid) {
......@@ -1944,11 +1951,16 @@ void igb_set_fw_version(struct igb_adapter *adapter)
fw.eep_major, fw.eep_minor, fw.etrack_id,
fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else {
} else if (fw.etrack_id != 0X0000) {
snprintf(adapter->fw_version,
sizeof(adapter->fw_version),
"%d.%d, 0x%08x",
fw.eep_major, fw.eep_minor, fw.etrack_id);
} else {
snprintf(adapter->fw_version,
sizeof(adapter->fw_version),
"%d.%d.%d",
fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
......@@ -2166,15 +2178,28 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
hw->mac.ops.reset_hw(hw);
/* make sure the NVM is good , i211 parts have special NVM that
* doesn't contain a checksum
/* make sure the NVM is good , i211/i210 parts can have special NVM
* that doesn't contain a checksum
*/
if (hw->mac.type != e1000_i211) {
switch (hw->mac.type) {
case e1000_i210:
case e1000_i211:
if (igb_get_flash_presence_i210(hw)) {
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev,
"The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
}
break;
default:
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
break;
}
/* copy the MAC address out of the NVM */
......@@ -2436,6 +2461,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
if (!adapter->msix_entries) {
err = -EPERM;
goto out;
}
if (!num_vfs)
goto out;
else if (old_vfs && old_vfs == num_vfs)
......@@ -3096,7 +3126,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
u32 j, num_rx_queues, shift = 0;
u32 j, num_rx_queues;
static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
0xA32DCB77, 0x0CF23080, 0x3BB7426A,
......@@ -3109,35 +3139,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
num_rx_queues = adapter->rss_queues;
switch (hw->mac.type) {
case e1000_82575:
shift = 6;
break;
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
if (adapter->vfs_allocated_count) {
shift = 3;
if (adapter->vfs_allocated_count)
num_rx_queues = 2;
}
break;
default:
break;
}
/* Populate the indirection table 4 entries at a time. To do this
* we are generating the results for n and n+2 and then interleaving
* those with the results with n+1 and n+3.
*/
for (j = 0; j < 32; j++) {
/* first pass generates n and n+2 */
u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
u32 reta = (base & 0x07800780) >> (7 - shift);
/* second pass generates n+1 and n+3 */
base += 0x00010001 * num_rx_queues;
reta |= (base & 0x07800780) << (1 + shift);
wr32(E1000_RETA(j), reta);
if (adapter->rss_indir_tbl_init != num_rx_queues) {
for (j = 0; j < IGB_RETA_SIZE; j++)
adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
adapter->rss_indir_tbl_init = num_rx_queues;
}
igb_write_rss_indir_tbl(adapter);
/* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback. No need to enable TCP/UDP/IP checksum
......@@ -3844,7 +3860,6 @@ bool igb_has_link(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
s32 ret_val = 0;
/* get_link_status is set on LSC (link status) interrupt or
* rx sequence error interrupt. get_link_status will stay
......@@ -3853,16 +3868,11 @@ bool igb_has_link(struct igb_adapter *adapter)
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
if (hw->mac.get_link_status) {
ret_val = hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
} else {
link_active = true;
}
break;
if (!hw->mac.get_link_status)
return true;
case e1000_media_type_internal_serdes:
ret_val = hw->mac.ops.check_for_link(hw);
link_active = hw->mac.serdes_has_link;
hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
break;
default:
case e1000_media_type_unknown:
......@@ -4814,6 +4824,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
......@@ -4865,6 +4879,8 @@ void igb_update_stats(struct igb_adapter *adapter,
bytes = 0;
packets = 0;
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
......@@ -4900,6 +4916,7 @@ void igb_update_stats(struct igb_adapter *adapter,
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
rcu_read_unlock();
/* read stats registers */
adapter->stats.crcerrs += rd32(E1000_CRCERRS);
......
......@@ -97,14 +97,14 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
u32 lo, hi;
u64 val;
u32 lo, hi, jk;
/* The timestamp latches on lowest register read. For the 82580
* the lowest register is SYSTIMR instead of SYSTIML. However we only
* need to provide nanosecond resolution, so we just ignore it.
*/
jk = rd32(E1000_SYSTIMR);
rd32(E1000_SYSTIMR);
lo = rd32(E1000_SYSTIML);
hi = rd32(E1000_SYSTIMH);
......@@ -118,13 +118,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
{
struct e1000_hw *hw = &adapter->hw;
u32 sec, nsec, jk;
u32 sec, nsec;
/* The timestamp latches on lowest register read. For I210/I211, the
* lowest register is SYSTIMR. Since we only need to provide nanosecond
* resolution, we can ignore it.
*/
jk = rd32(E1000_SYSTIMR);
rd32(E1000_SYSTIMR);
nsec = rd32(E1000_SYSTIML);
sec = rd32(E1000_SYSTIMH);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment