Commit 0bcf6aa7 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to ixgbevf and e1000e.

Alex's ixgbevf patch is meant to address several race issues that become
possible because next_to_watch could possibly be set to a value that shows
that the descriptor is done when it is not.  In order to correct that we
instead make next_to_watch a pointer that is set to NULL during cleanup,
and set to the eop_desc after the descriptor rings have been written.

Stephen's ixgbevf patch makes the PCI id table a const and reformats the
table to match what the ixgbe driver does.

The remaining 13 patches from Bruce are cleanup patches for e1000e to
resolve checkpatch.pl warnings/errors, removing blank lines where
necessary and fix code formatting.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c031e234 3ffcf2cb
...@@ -37,7 +37,9 @@ ...@@ -37,7 +37,9 @@
* "index + 5". * "index + 5".
*/ */
static const u16 e1000_gg82563_cable_length_table[] = { static const u16 e1000_gg82563_cable_length_table[] = {
0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
};
#define GG82563_CABLE_LENGTH_TABLE_SIZE \ #define GG82563_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_gg82563_cable_length_table) ARRAY_SIZE(e1000_gg82563_cable_length_table)
...@@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ...@@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI * before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command... * transaction. So we wait 200us after each MDI command...
*/ */
udelay(200); usleep_range(200, 400);
/* ...and verify the command was successful. */ /* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
...@@ -403,13 +405,13 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ...@@ -403,13 +405,13 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY; return -E1000_ERR_PHY;
} }
udelay(200); usleep_range(200, 400);
ret_val = e1000e_read_phy_reg_mdic(hw, ret_val = e1000e_read_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset, MAX_PHY_REG_ADDRESS & offset,
data); data);
udelay(200); usleep_range(200, 400);
} else { } else {
ret_val = e1000e_read_phy_reg_mdic(hw, ret_val = e1000e_read_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset, MAX_PHY_REG_ADDRESS & offset,
...@@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ...@@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI * before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command... * transaction. So we wait 200us after each MDI command...
*/ */
udelay(200); usleep_range(200, 400);
/* ...and verify the command was successful. */ /* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
...@@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ...@@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY; return -E1000_ERR_PHY;
} }
udelay(200); usleep_range(200, 400);
ret_val = e1000e_write_phy_reg_mdic(hw, ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset, MAX_PHY_REG_ADDRESS &
data); offset, data);
udelay(200); usleep_range(200, 400);
} else { } else {
ret_val = e1000e_write_phy_reg_mdic(hw, ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset, MAX_PHY_REG_ADDRESS &
data); offset, data);
} }
e1000_release_phy_80003es2lan(hw); e1000_release_phy_80003es2lan(hw);
...@@ -666,9 +668,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, ...@@ -666,9 +668,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
s32 ret_val; s32 ret_val;
if (hw->phy.media_type == e1000_media_type_copper) { if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw, ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
speed,
duplex);
hw->phy.ops.cfg_on_link_up(hw); hw->phy.ops.cfg_on_link_up(hw);
} else { } else {
ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
...@@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) ...@@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Initialize identification LED */ /* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw); ret_val = mac->ops.id_led_init(hw);
/* An error is not fatal and we should not stop init due to this */
if (ret_val) if (ret_val)
e_dbg("Error initializing identification LED\n"); e_dbg("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */ /* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n"); e_dbg("Initializing the IEEE VLAN\n");
...@@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) ...@@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */ /* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0)); reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data); ew32(TXDCTL(0), reg_data);
/* ...for both queues. */ /* ...for both queues. */
reg_data = er32(TXDCTL(1)); reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data); ew32(TXDCTL(1), reg_data);
/* Enable retransmit on late collisions */ /* Enable retransmit on late collisions */
...@@ -818,10 +818,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) ...@@ -818,10 +818,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* default to true to enable the MDIC W/A */ /* default to true to enable the MDIC W/A */
hw->dev_spec.e80003es2lan.mdic_wa_enable = true; hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, ret_val =
E1000_KMRNCTRLSTA_OFFSET >> e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
E1000_KMRNCTRLSTA_OFFSET_SHIFT, E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
&i);
if (!ret_val) { if (!ret_val) {
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
...@@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) ...@@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
{ {
struct e1000_phy_info *phy = &hw->phy; struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; s32 ret_val;
u32 ctrl_ext; u32 reg;
u16 data; u16 data;
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
...@@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) ...@@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
} }
/* Bypass Rx and Tx FIFO's */ /* Bypass Rx and Tx FIFO's */
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
&data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) ...@@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ctrl_ext = er32(CTRL_EXT); reg = er32(CTRL_EXT);
ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
ew32(CTRL_EXT, ctrl_ext); ew32(CTRL_EXT, reg);
ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
if (ret_val) if (ret_val)
...@@ -1061,13 +1057,15 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) ...@@ -1061,13 +1057,15 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
reg_data); reg_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, ret_val =
e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
&reg_data); &reg_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, ret_val =
e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
reg_data); reg_data);
if (ret_val) if (ret_val)
...@@ -1125,7 +1123,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) ...@@ -1125,7 +1123,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
u16 reg_data, reg_data2; u16 reg_data, reg_data2;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, ret_val =
e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data); reg_data);
if (ret_val) if (ret_val)
...@@ -1171,7 +1170,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) ...@@ -1171,7 +1170,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
u32 i = 0; u32 i = 0;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, ret_val =
e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data); reg_data);
if (ret_val) if (ret_val)
...@@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = { ...@@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = {
.phy_ops = &es2_phy_ops, .phy_ops = &es2_phy_ops,
.nvm_ops = &es2_nvm_ops, .nvm_ops = &es2_nvm_ops,
}; };
...@@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) ...@@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
return ret_val; return ret_val;
phy->id = (u32)(phy_id << 16); phy->id = (u32)(phy_id << 16);
udelay(20); usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) ...@@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI)) if (!(swsm & E1000_SWSM_SMBI))
break; break;
udelay(50); usleep_range(50, 100);
i++; i++;
} }
...@@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) ...@@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI) if (er32(SWSM) & E1000_SWSM_SWESMBI)
break; break;
udelay(50); usleep_range(50, 100);
} }
if (i == fw_timeout) { if (i == fw_timeout) {
...@@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) ...@@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm); ew32(SWSM, swsm);
} }
/** /**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, ...@@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
} }
for (i = 0; i < words; i++) { for (i = 0; i < words; i++) {
eewr = (data[i] << E1000_NVM_RW_REG_DATA) | eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
E1000_NVM_RW_REG_START; E1000_NVM_RW_REG_START);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val) if (ret_val)
...@@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) ...@@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
s32 timeout = PHY_CFG_TIMEOUT; s32 timeout = PHY_CFG_TIMEOUT;
while (timeout) { while (timeout) {
if (er32(EEMNGCTL) & if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
E1000_NVM_CFG_DONE_PORT_0)
break; break;
usleep_range(1000, 2000); usleep_range(1000, 2000);
timeout--; timeout--;
...@@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ...@@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
} }
if (hw->nvm.type == e1000_nvm_flash_hw) { if (hw->nvm.type == e1000_nvm_flash_hw) {
udelay(10); usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT); ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST; ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext); ew32(CTRL_EXT, ctrl_ext);
...@@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) ...@@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Initialize identification LED */ /* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw); ret_val = mac->ops.id_led_init(hw);
/* An error is not fatal and we should not stop init due to this */
if (ret_val) if (ret_val)
e_dbg("Error initializing identification LED\n"); e_dbg("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */ /* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n"); e_dbg("Initializing the IEEE VLAN\n");
...@@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) ...@@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */ /* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0)); reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(0), reg_data); ew32(TXDCTL(0), reg_data);
/* ...for both queues. */ /* ...for both queues. */
...@@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) ...@@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
break; break;
default: default:
reg_data = er32(TXDCTL(1)); reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC; E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data); ew32(TXDCTL(1), reg_data);
break; break;
} }
...@@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) ...@@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
status = er32(STATUS); status = er32(STATUS);
er32(RXCW); er32(RXCW);
/* SYNCH bit and IV bit are sticky */ /* SYNCH bit and IV bit are sticky */
udelay(10); usleep_range(10, 20);
rxcw = er32(RXCW); rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
...@@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) ...@@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* the IV bit and restart Autoneg * the IV bit and restart Autoneg
*/ */
for (i = 0; i < AN_RETRY_COUNT; i++) { for (i = 0; i < AN_RETRY_COUNT; i++) {
udelay(10); usleep_range(10, 20);
rxcw = er32(RXCW); rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && if ((rxcw & E1000_RXCW_SYNCH) &&
(rxcw & E1000_RXCW_C)) (rxcw & E1000_RXCW_C))
...@@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = { ...@@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = {
.phy_ops = &e82_phy_ops_bm, .phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops, .nvm_ops = &e82571_nvm_ops,
}; };
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAC_MASK_82574 0x01F00000 #define E1000_EIAC_MASK_82574 0x01F00000
#define E1000_IVAR_INT_ALLOC_VALID 0x8
/* Manageability Operation Mode mask */ /* Manageability Operation Mode mask */
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 #define E1000_NVM_INIT_CTRL2_MNGM 0x6000
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ #define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000 #define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000 #define E1000_CTRL_EXT_PHYPDEN 0x00100000
...@@ -216,6 +216,8 @@ ...@@ -216,6 +216,8 @@
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ #define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */ #define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
...@@ -239,12 +241,11 @@ ...@@ -239,12 +241,11 @@
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ #define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
#define HALF_DUPLEX 1 #define HALF_DUPLEX 1
#define FULL_DUPLEX 2 #define FULL_DUPLEX 2
#define ADVERTISE_10_HALF 0x0001 #define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002 #define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004 #define ADVERTISE_100_HALF 0x0004
...@@ -311,6 +312,7 @@ ...@@ -311,6 +312,7 @@
/* SerDes Control */ /* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
/* Receive Checksum Control */ /* Receive Checksum Control */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
...@@ -400,7 +402,8 @@ ...@@ -400,7 +402,8 @@
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ /* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
...@@ -583,12 +586,12 @@ ...@@ -583,12 +586,12 @@
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ #define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) #define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ #define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ #define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define E1000_NVM_RW_REG_START 1 /* Start operation */ #define E1000_NVM_RW_REG_START 1 /* Start operation */
#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ #define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ #define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ #define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
#define E1000_FLASH_UPDATES 2000 #define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */ /* NVM Word Offsets */
......
...@@ -61,7 +61,6 @@ struct e1000_info; ...@@ -61,7 +61,6 @@ struct e1000_info;
#define e_notice(format, arg...) \ #define e_notice(format, arg...) \
netdev_notice(adapter->netdev, format, ## arg) netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */ /* Interrupt modes, as used by the IntMode parameter */
#define E1000E_INT_MODE_LEGACY 0 #define E1000E_INT_MODE_LEGACY 0
#define E1000E_INT_MODE_MSI 1 #define E1000E_INT_MODE_MSI 1
...@@ -239,9 +238,8 @@ struct e1000_adapter { ...@@ -239,9 +238,8 @@ struct e1000_adapter {
u16 tx_itr; u16 tx_itr;
u16 rx_itr; u16 rx_itr;
/* Tx */ /* Tx - one ring per active queue */
struct e1000_ring *tx_ring /* One per active queue */ struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
____cacheline_aligned_in_smp;
u32 tx_fifo_limit; u32 tx_fifo_limit;
struct napi_struct napi; struct napi_struct napi;
...@@ -558,12 +556,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) ...@@ -558,12 +556,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
return hw->nvm.ops.update(hw); return hw->nvm.ops.update(hw);
} }
static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{ {
return hw->nvm.ops.read(hw, offset, words, data); return hw->nvm.ops.read(hw, offset, words, data);
} }
static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{ {
return hw->nvm.ops.write(hw, offset, words, data); return hw->nvm.ops.write(hw, offset, words, data);
} }
...@@ -597,7 +597,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw) ...@@ -597,7 +597,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50); usleep_range(50, 100);
return i; return i;
} }
......
This diff is collapsed.
...@@ -167,7 +167,7 @@ enum e1000_1000t_rx_status { ...@@ -167,7 +167,7 @@ enum e1000_1000t_rx_status {
e1000_1000t_rx_status_undefined = 0xFF e1000_1000t_rx_status_undefined = 0xFF
}; };
enum e1000_rev_polarity{ enum e1000_rev_polarity {
e1000_rev_polarity_normal = 0, e1000_rev_polarity_normal = 0,
e1000_rev_polarity_reversed, e1000_rev_polarity_reversed,
e1000_rev_polarity_undefined = 0xFF e1000_rev_polarity_undefined = 0xFF
...@@ -545,7 +545,7 @@ struct e1000_mac_info { ...@@ -545,7 +545,7 @@ struct e1000_mac_info {
u16 mta_reg_count; u16 mta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */ /* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128 #define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG]; u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count; u16 rar_entry_count;
......
This diff is collapsed.
...@@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) ...@@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* serdes media type. * serdes media type.
*/ */
/* SYNCH bit and IV bit are sticky. */ /* SYNCH bit and IV bit are sticky. */
udelay(10); usleep_range(10, 20);
rxcw = er32(RXCW); rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) { if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) { if (!(rxcw & E1000_RXCW_IV)) {
...@@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) ...@@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
status = er32(STATUS); status = er32(STATUS);
if (status & E1000_STATUS_LU) { if (status & E1000_STATUS_LU) {
/* SYNCH bit and IV bit are sticky, so reread rxcw. */ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
udelay(10); usleep_range(10, 20);
rxcw = er32(RXCW); rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) { if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) { if (!(rxcw & E1000_RXCW_IV)) {
...@@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) ...@@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI)) if (!(swsm & E1000_SWSM_SMBI))
break; break;
udelay(50); usleep_range(50, 100);
i++; i++;
} }
...@@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) ...@@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI) if (er32(SWSM) & E1000_SWSM_SWESMBI)
break; break;
udelay(50); usleep_range(50, 100);
} }
if (i == timeout) { if (i == timeout) {
...@@ -1712,7 +1712,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) ...@@ -1712,7 +1712,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
while (timeout) { while (timeout) {
if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
break; break;
udelay(100); usleep_range(100, 200);
timeout--; timeout--;
} }
......
This diff is collapsed.
...@@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw) ...@@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw)
{ {
u32 ctrl_ext; u32 ctrl_ext;
udelay(10); usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT); ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST; ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext); ew32(CTRL_EXT, ctrl_ext);
......
...@@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); ...@@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
* *
* Default Value: 1 (enabled) * Default Value: 1 (enabled)
*/ */
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); E1000_PARAM(WriteProtectNVM,
"Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
/* Enable CRC Stripping /* Enable CRC Stripping
* *
...@@ -160,13 +161,18 @@ struct e1000_option { ...@@ -160,13 +161,18 @@ struct e1000_option {
const char *err; const char *err;
int def; int def;
union { union {
struct { /* range_option info */ /* range_option info */
struct {
int min; int min;
int max; int max;
} r; } r;
struct { /* list_option info */ /* list_option info */
struct {
int nr; int nr;
struct e1000_opt_list { int i; char *str; } *p; struct e1000_opt_list {
int i;
char *str;
} *p;
} l; } l;
} arg; } arg;
}; };
...@@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"Using defaults for all values\n"); "Using defaults for all values\n");
} }
{ /* Transmit Interrupt Delay */ /* Transmit Interrupt Delay */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = range_option, .type = range_option,
.name = "Transmit Interrupt Delay", .name = "Transmit Interrupt Delay",
...@@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_int_delay = opt.def; adapter->tx_int_delay = opt.def;
} }
} }
{ /* Transmit Absolute Interrupt Delay */ /* Transmit Absolute Interrupt Delay */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = range_option, .type = range_option,
.name = "Transmit Absolute Interrupt Delay", .name = "Transmit Absolute Interrupt Delay",
...@@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_abs_int_delay = opt.def; adapter->tx_abs_int_delay = opt.def;
} }
} }
{ /* Receive Interrupt Delay */ /* Receive Interrupt Delay */
{
static struct e1000_option opt = { static struct e1000_option opt = {
.type = range_option, .type = range_option,
.name = "Receive Interrupt Delay", .name = "Receive Interrupt Delay",
...@@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_int_delay = opt.def; adapter->rx_int_delay = opt.def;
} }
} }
{ /* Receive Absolute Interrupt Delay */ /* Receive Absolute Interrupt Delay */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = range_option, .type = range_option,
.name = "Receive Absolute Interrupt Delay", .name = "Receive Absolute Interrupt Delay",
...@@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_abs_int_delay = opt.def; adapter->rx_abs_int_delay = opt.def;
} }
} }
{ /* Interrupt Throttling Rate */ /* Interrupt Throttling Rate */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = range_option, .type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)", .name = "Interrupt Throttling Rate (ints/sec)",
...@@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
break; break;
} }
} }
{ /* Interrupt Mode */ /* Interrupt Mode */
{
static struct e1000_option opt = { static struct e1000_option opt = {
.type = range_option, .type = range_option,
.name = "Interrupt Mode", .name = "Interrupt Mode",
...@@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
kfree(opt.err); kfree(opt.err);
#endif #endif
} }
{ /* Smart Power Down */ /* Smart Power Down */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = enable_option, .type = enable_option,
.name = "PHY Smart Power Down", .name = "PHY Smart Power Down",
...@@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags |= FLAG_SMART_POWER_DOWN; adapter->flags |= FLAG_SMART_POWER_DOWN;
} }
} }
{ /* CRC Stripping */ /* CRC Stripping */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = enable_option, .type = enable_option,
.name = "CRC Stripping", .name = "CRC Stripping",
...@@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
} }
} }
{ /* Kumeran Lock Loss Workaround */ /* Kumeran Lock Loss Workaround */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = enable_option, .type = enable_option,
.name = "Kumeran Lock Loss Workaround", .name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled", .err = "defaulting to Enabled",
.def = OPTION_ENABLED .def = OPTION_ENABLED
}; };
bool enabled = opt.def;
if (num_KumeranLockLoss > bd) { if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter); e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
if (hw->mac.type == e1000_ich8lan) if (hw->mac.type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
kmrn_lock_loss); enabled);
} else {
if (hw->mac.type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
opt.def);
}
} }
{ /* Write-protect NVM */ /* Write-protect NVM */
{
static const struct e1000_option opt = { static const struct e1000_option opt = {
.type = enable_option, .type = enable_option,
.name = "Write-protect NVM", .name = "Write-protect NVM",
...@@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) ...@@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) { if (adapter->flags & FLAG_IS_ICH) {
if (num_WriteProtectNVM > bd) { if (num_WriteProtectNVM > bd) {
unsigned int write_protect_nvm = WriteProtectNVM[bd]; unsigned int write_protect_nvm =
WriteProtectNVM[bd];
e1000_validate_option(&write_protect_nvm, &opt, e1000_validate_option(&write_protect_nvm, &opt,
adapter); adapter);
if (write_protect_nvm) if (write_protect_nvm)
......
...@@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, ...@@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
/* Cable length tables */ /* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = { static const u16 e1000_m88_cable_length_table[] = {
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
};
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ #define M88E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_m88_cable_length_table) ARRAY_SIZE(e1000_m88_cable_length_table)
...@@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = { ...@@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
124}; 124
};
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table) ARRAY_SIZE(e1000_igp_2_cable_length_table)
...@@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) ...@@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
manc = er32(MANC); manc = er32(MANC);
return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
E1000_BLK_PHY_RESET : 0;
} }
/** /**
...@@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) ...@@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
return ret_val; return ret_val;
phy->id = (u32)(phy_id << 16); phy->id = (u32)(phy_id << 16);
udelay(20); usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -162,7 +165,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) ...@@ -162,7 +165,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* the lower time out * the lower time out
*/ */
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
udelay(50); usleep_range(50, 100);
mdic = er32(MDIC); mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY) if (mdic & E1000_MDIC_READY)
break; break;
...@@ -175,13 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) ...@@ -175,13 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
e_dbg("MDI Error\n"); e_dbg("MDI Error\n");
return -E1000_ERR_PHY; return -E1000_ERR_PHY;
} }
*data = (u16) mdic; *data = (u16)mdic;
/* Allow some time after each MDIC transaction to avoid /* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction. * reading duplicate data in the next MDIC transaction.
*/ */
if (hw->mac.type == e1000_pch2lan) if (hw->mac.type == e1000_pch2lan)
udelay(100); usleep_range(100, 200);
return 0; return 0;
} }
...@@ -220,7 +223,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) ...@@ -220,7 +223,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* the lower time out * the lower time out
*/ */
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
udelay(50); usleep_range(50, 100);
mdic = er32(MDIC); mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY) if (mdic & E1000_MDIC_READY)
break; break;
...@@ -238,7 +241,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) ...@@ -238,7 +241,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* reading duplicate data in the next MDIC transaction. * reading duplicate data in the next MDIC transaction.
*/ */
if (hw->mac.type == e1000_pch2lan) if (hw->mac.type == e1000_pch2lan)
udelay(100); usleep_range(100, 200);
return 0; return 0;
} }
...@@ -410,8 +413,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, ...@@ -410,8 +413,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
(u16)offset); (u16)offset);
if (!ret_val) if (!ret_val)
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
offset, offset, data);
data);
if (!locked) if (!locked)
hw->phy.ops.release(hw); hw->phy.ops.release(hw);
...@@ -772,8 +774,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) ...@@ -772,8 +774,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
phy_data |= M88E1000_EPSCR_TX_CLK_25; phy_data |= M88E1000_EPSCR_TX_CLK_25;
if ((phy->revision == 2) && if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
(phy->id == M88E1111_I_PHY_ID)) {
/* 82573L PHY - set the downshift counter to 5x. */ /* 82573L PHY - set the downshift counter to 5x. */
phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
...@@ -1609,9 +1610,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw) ...@@ -1609,9 +1610,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val) if (!ret_val)
phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
? e1000_rev_polarity_reversed ? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal; : e1000_rev_polarity_normal);
return ret_val; return ret_val;
} }
...@@ -1653,9 +1654,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) ...@@ -1653,9 +1654,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &data); ret_val = e1e_rphy(hw, offset, &data);
if (!ret_val) if (!ret_val)
phy->cable_polarity = (data & mask) phy->cable_polarity = ((data & mask)
? e1000_rev_polarity_reversed ? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal; : e1000_rev_polarity_normal);
return ret_val; return ret_val;
} }
...@@ -1685,9 +1686,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw) ...@@ -1685,9 +1686,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &phy_data); ret_val = e1e_rphy(hw, offset, &phy_data);
if (!ret_val) if (!ret_val)
phy->cable_polarity = (phy_data & mask) phy->cable_polarity = ((phy_data & mask)
? e1000_rev_polarity_reversed ? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal; : e1000_rev_polarity_normal);
return ret_val; return ret_val;
} }
...@@ -1756,7 +1757,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ...@@ -1756,7 +1757,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
if (phy_status & BMSR_LSTATUS) if (phy_status & BMSR_LSTATUS)
break; break;
if (usec_interval >= 1000) if (usec_interval >= 1000)
mdelay(usec_interval/1000); mdelay(usec_interval / 1000);
else else
udelay(usec_interval); udelay(usec_interval);
} }
...@@ -1791,8 +1792,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) ...@@ -1791,8 +1792,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val) if (ret_val)
return ret_val; return ret_val;
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT; M88E1000_PSSR_CABLE_LENGTH_SHIFT);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY; return -E1000_ERR_PHY;
...@@ -1841,8 +1842,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) ...@@ -1841,8 +1842,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
* that can be put into the lookup table to obtain the * that can be put into the lookup table to obtain the
* approximate cable length. * approximate cable length.
*/ */
cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
IGP02E1000_AGC_LENGTH_MASK; IGP02E1000_AGC_LENGTH_MASK);
/* Array index bound check. */ /* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
...@@ -1865,8 +1866,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) ...@@ -1865,8 +1866,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */ /* Calculate cable length with the error range of +/- 10 meters. */
phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
(agc_value - IGP02E1000_AGC_RANGE) : 0; (agc_value - IGP02E1000_AGC_RANGE) : 0);
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
...@@ -2040,9 +2041,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) ...@@ -2040,9 +2041,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
return ret_val; return ret_val;
} else { } else {
/* Polarity is forced */ /* Polarity is forced */
phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
? e1000_rev_polarity_reversed ? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal; : e1000_rev_polarity_normal);
} }
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
...@@ -2119,7 +2120,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) ...@@ -2119,7 +2120,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
ew32(CTRL, ctrl); ew32(CTRL, ctrl);
e1e_flush(); e1e_flush();
udelay(150); usleep_range(150, 300);
phy->ops.release(hw); phy->ops.release(hw);
...@@ -2786,8 +2787,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, ...@@ -2786,8 +2787,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
page << IGP_PAGE_SHIFT, reg); page << IGP_PAGE_SHIFT, reg);
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
data);
out: out:
if (!locked) if (!locked)
hw->phy.ops.release(hw); hw->phy.ops.release(hw);
...@@ -2995,8 +2995,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, ...@@ -2995,8 +2995,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u32 data_reg; u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */ /* This takes care of the difference with desktop vs mobile phy */
addr_reg = (hw->phy.type == e1000_phy_82578) ? addr_reg = ((hw->phy.type == e1000_phy_82578) ?
I82578_ADDR_REG : I82577_ADDR_REG; I82578_ADDR_REG : I82577_ADDR_REG);
data_reg = addr_reg + 1; data_reg = addr_reg + 1;
/* All operations in this function are phy address 2 */ /* All operations in this function are phy address 2 */
...@@ -3050,8 +3050,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) ...@@ -3050,8 +3050,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
if (ret_val) if (ret_val)
return ret_val; return ret_val;
data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_MASK; BM_CS_STATUS_SPEED_MASK);
if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_1000)) BM_CS_STATUS_SPEED_1000))
...@@ -3086,9 +3086,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) ...@@ -3086,9 +3086,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val) if (!ret_val)
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
? e1000_rev_polarity_reversed ? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal; : e1000_rev_polarity_normal);
return ret_val; return ret_val;
} }
...@@ -3215,8 +3215,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) ...@@ -3215,8 +3215,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val) if (ret_val)
return ret_val; return ret_val;
length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
I82577_DSTATUS_CABLE_LENGTH_SHIFT; I82577_DSTATUS_CABLE_LENGTH_SHIFT);
if (length == E1000_CABLE_LENGTH_UNDEFINED) if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY; return -E1000_ERR_PHY;
......
...@@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer { ...@@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
unsigned long time_stamp; unsigned long time_stamp;
union ixgbe_adv_tx_desc *next_to_watch;
u16 length; u16 length;
u16 next_to_watch;
u16 mapped_as_page; u16 mapped_as_page;
}; };
......
...@@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { ...@@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) } * Class, Class Mask, private data (not used) }
*/ */
static struct pci_device_id ixgbevf_pci_tbl[] = { static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
board_82599_vf}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
board_X540_vf},
/* required last entry */ /* required last entry */
{0, } {0, }
}; };
...@@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc; union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info; struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int i, eop, count = 0; unsigned int i, count = 0;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
if (test_bit(__IXGBEVF_DOWN, &adapter->state)) if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true; return true;
i = tx_ring->next_to_clean; i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch; tx_buffer_info = &tx_ring->tx_buffer_info[i];
eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); eop_desc = tx_buffer_info->next_to_watch;
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && do {
(count < tx_ring->count)) {
bool cleaned = false; bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
/* eop could change between read and DD-check */ /* if next_to_watch is not set then there is no work pending */
if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) if (!eop_desc)
goto cont_loop; break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
/* clear next_to_watch to prevent false hangs */
tx_buffer_info->next_to_watch = NULL;
for ( ; !cleaned; count++) { for ( ; !cleaned; count++) {
struct sk_buff *skb; struct sk_buff *skb;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i); tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i]; cleaned = (tx_desc == eop_desc);
cleaned = (i == eop);
skb = tx_buffer_info->skb; skb = tx_buffer_info->skb;
if (cleaned && skb) { if (cleaned && skb) {
...@@ -234,13 +240,13 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -234,13 +240,13 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
i = 0; i = 0;
}
cont_loop: tx_buffer_info = &tx_ring->tx_buffer_info[i];
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
} }
eop_desc = tx_buffer_info->next_to_watch;
} while (count < tx_ring->count);
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
...@@ -2806,8 +2812,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, ...@@ -2806,8 +2812,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
} }
static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags, struct sk_buff *skb, u32 tx_flags)
unsigned int first)
{ {
struct ixgbevf_tx_buffer *tx_buffer_info; struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len; unsigned int len;
...@@ -2832,7 +2837,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, ...@@ -2832,7 +2837,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
size, DMA_TO_DEVICE); size, DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error; goto dma_error;
tx_buffer_info->next_to_watch = i;
len -= size; len -= size;
total -= size; total -= size;
...@@ -2862,7 +2866,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, ...@@ -2862,7 +2866,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_buffer_info->dma)) tx_buffer_info->dma))
goto dma_error; goto dma_error;
tx_buffer_info->mapped_as_page = true; tx_buffer_info->mapped_as_page = true;
tx_buffer_info->next_to_watch = i;
len -= size; len -= size;
total -= size; total -= size;
...@@ -2881,8 +2884,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, ...@@ -2881,8 +2884,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
else else
i = i - 1; i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb; tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count; return count;
...@@ -2891,7 +2892,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, ...@@ -2891,7 +2892,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
/* clear timestamp and dma mappings for failed tx_buffer_info map */ /* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0; tx_buffer_info->dma = 0;
tx_buffer_info->next_to_watch = 0;
count--; count--;
/* clear timestamp and dma mappings for remaining portion of packet */ /* clear timestamp and dma mappings for remaining portion of packet */
...@@ -2908,7 +2908,8 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, ...@@ -2908,7 +2908,8 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
} }
static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
int count, u32 paylen, u8 hdr_len) int count, unsigned int first, u32 paylen,
u8 hdr_len)
{ {
union ixgbe_adv_tx_desc *tx_desc = NULL; union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbevf_tx_buffer *tx_buffer_info; struct ixgbevf_tx_buffer *tx_buffer_info;
...@@ -2959,6 +2960,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, ...@@ -2959,6 +2960,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
tx_ring->tx_buffer_info[first].time_stamp = jiffies;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
...@@ -3050,15 +3061,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3050,15 +3061,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_flags |= IXGBE_TX_FLAGS_CSUM; tx_flags |= IXGBE_TX_FLAGS_CSUM;
ixgbevf_tx_queue(tx_ring, tx_flags, ixgbevf_tx_queue(tx_ring, tx_flags,
ixgbevf_tx_map(tx_ring, skb, tx_flags, first), ixgbevf_tx_map(tx_ring, skb, tx_flags),
skb->len, hdr_len); first, skb->len, hdr_len);
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment