Commit 6a8fab17 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-07-26

This series contains updates to ixgbe and igb.

Tony fixes ixgbe to add checks to ensure jumbo frames or LRO get enabled
after an XDP program is loaded.

Shannon Nelson adds the missing security configuration registers to the
ixgbe register dump, which will help in debugging.

Christian Grönke fixes an issue in igb that occurs on SGMII based SPF
mdoules, by reverting changes from 2 previous patches.  The issue was
that initialization would fail on the fore mentioned modules because the
driver would try to reset the PHY before obtaining the PHY address of
the SGMII attached PHY.

Venkatesh Srinivas replaces wmb() with dma_wmb() for doorbell writes,
which avoids SFENCEs before the doorbell writes.

Alex cleans up and refactors ixgbe Tx/Rx shutdown to reduce time needed
to stop the device.  The code refactor allows us to take the completion
time into account when disabling queues, so that on some platforms with
higher completion times, would not result in receive queues disabled
messages.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c921d7db 1918e937
...@@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) ...@@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT; E1000_STATUS_FUNC_SHIFT;
/* Make sure the PHY is in a good state. Several people have reported
* firmware leaving the PHY's page select register set to something
* other than the default of zero, which causes the PHY ID read to
* access something other than the intended register.
*/
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
hw_dbg("Error resetting the PHY.\n");
goto out;
}
/* Set phy->phy_addr and phy->id. */ /* Set phy->phy_addr and phy->id. */
igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw); ret_val = igb_get_phy_id_82575(hw);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
......
...@@ -6031,7 +6031,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, ...@@ -6031,7 +6031,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
* We also need this memory barrier to make certain all of the * We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written. * status bits have been updated before next_to_watch is written.
*/ */
wmb(); dma_wmb();
/* set next_to_watch value indicating a packet is present */ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; first->next_to_watch = tx_desc;
...@@ -8531,7 +8531,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -8531,7 +8531,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
* such as IA-64). * such as IA-64).
*/ */
wmb(); dma_wmb();
writel(i, rx_ring->tail); writel(i, rx_ring->tail);
} }
} }
......
...@@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *); ...@@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *);
void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
void ixgbe_update_stats(struct ixgbe_adapter *adapter); void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
......
...@@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) ...@@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev) static int ixgbe_get_regs_len(struct net_device *netdev)
{ {
#define IXGBE_REGS_LEN 1139 #define IXGBE_REGS_LEN 1145
return IXGBE_REGS_LEN * sizeof(u32); return IXGBE_REGS_LEN * sizeof(u32);
} }
...@@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev, ...@@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* X540 specific DCB registers */ /* X540 specific DCB registers */
regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
/* Security config registers */
regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
} }
static int ixgbe_get_eeprom_len(struct net_device *netdev) static int ixgbe_get_eeprom_len(struct net_device *netdev)
...@@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) ...@@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; /* Shut down the DMA engines now so they can be reinitialized later,
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; * since the test rings and normally used rings should overlap on
struct ixgbe_hw *hw = &adapter->hw; * queue 0 we can just use the standard disable Rx/Tx calls and they
u32 reg_ctl; * will take care of disabling the test rings for us.
*/
/* shut down the DMA engines now so they can be reinitialized later */
/* first Rx */ /* first Rx */
hw->mac.ops.disable_rx(hw); ixgbe_disable_rx(adapter);
ixgbe_disable_rx_queue(adapter, rx_ring);
/* now Tx */ /* now Tx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); ixgbe_disable_tx(adapter);
reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
break;
default:
break;
}
ixgbe_reset(adapter); ixgbe_reset(adapter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment