Commit 0349a86c authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2017-10-09

This series contains updates to ixgbe only.

Emil fixes an issue where the semaphore bits could be stuck after a reset
or a crash, by adding the clearing of software resource bits in the
software/firmware synchronization register.  Added error checks when we
attempt to identify and initialize the PHY to prevent a crash.  Fixed a
few issues in the logic of ixgbe_clean_test_rings() which was exposed by
a previous commit that was causing a crash in ethtool diagnostics.

Bhumika Goyal fixes a couple of instances which were overlooked when we
made ixgbe_mac_operations constant.

Shannon Nelson fixes an issue to restore normal operations after the
last MACVLAN offload is removed, otherwise we get stuck in a single queue
operations.

The infamous Jesper Dangaard Brouer adds a counter which counts the
number of times the recycle fails and the real page allocator is invoked.

Alex updates the adaptive ITR algorithm to better support the needs of the
network.  This attempt to make it so that our ITR algorithm will try to
prevent either starving a socket buffer for memory in the case of
transmit, or overrunning an receive socket buffer on receive.  We should
function better with new features like XDP which can handle small packets
at high rates without needing to lock us into NAPI polling mode.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2e997d8b b64666ae
...@@ -275,6 +275,7 @@ struct ixgbe_rx_queue_stats { ...@@ -275,6 +275,7 @@ struct ixgbe_rx_queue_stats {
u64 rsc_count; u64 rsc_count;
u64 rsc_flush; u64 rsc_flush;
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_rx_page;
u64 alloc_rx_page_failed; u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
u64 csum_err; u64 csum_err;
...@@ -434,8 +435,15 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) ...@@ -434,8 +435,15 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
} }
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
#define IXGBE_ITR_ADAPTIVE_MIN_INC 2
#define IXGBE_ITR_ADAPTIVE_MIN_USECS 10
#define IXGBE_ITR_ADAPTIVE_MAX_USECS 126
#define IXGBE_ITR_ADAPTIVE_LATENCY 0x80
#define IXGBE_ITR_ADAPTIVE_BULK 0x00
struct ixgbe_ring_container { struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */ struct ixgbe_ring *ring; /* pointer to linked list of rings */
unsigned long next_update; /* jiffies value of last update */
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */ u16 work_limit; /* total work allowed per interrupt */
...@@ -655,6 +663,7 @@ struct ixgbe_adapter { ...@@ -655,6 +663,7 @@ struct ixgbe_adapter {
u64 rsc_total_count; u64 rsc_total_count;
u64 rsc_total_flush; u64 rsc_total_flush;
u64 non_eop_descs; u64 non_eop_descs;
u32 alloc_rx_page;
u32 alloc_rx_page_failed; u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed; u32 alloc_rx_buff_failed;
......
...@@ -3800,10 +3800,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, ...@@ -3800,10 +3800,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.ver_build = build; fw_cmd.ver_build = build;
fw_cmd.ver_sub = sub; fw_cmd.ver_sub = sub;
fw_cmd.hdr.checksum = 0; fw_cmd.hdr.checksum = 0;
fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
fw_cmd.pad = 0; fw_cmd.pad = 0;
fw_cmd.pad2 = 0; fw_cmd.pad2 = 0;
fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, &fw_cmd, ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
...@@ -4100,8 +4100,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw) ...@@ -4100,8 +4100,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
return false; return false;
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
fwsm &= IXGBE_FWSM_MODE_MASK;
return fwsm == IXGBE_FWSM_FW_MODE_PT; return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
} }
/** /**
......
...@@ -104,6 +104,7 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { ...@@ -104,6 +104,7 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
{"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
...@@ -1916,8 +1917,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ...@@ -1916,8 +1917,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
unsigned int size) unsigned int size)
{ {
union ixgbe_adv_rx_desc *rx_desc; union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct ixgbe_tx_buffer *tx_buffer;
u16 rx_ntc, tx_ntc, count = 0; u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */ /* initialize next to clean and descriptor values */
...@@ -1925,7 +1924,38 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ...@@ -1925,7 +1924,38 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
while (tx_ntc != tx_ring->next_to_use) {
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_buffer;
tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
/* if DD is not set transmit has not completed */
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
return count;
/* unmap buffer on Tx side */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
/* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
/* increment Tx next to clean counter */
tx_ntc++;
if (tx_ntc == tx_ring->count)
tx_ntc = 0;
}
while (rx_desc->wb.upper.length) { while (rx_desc->wb.upper.length) {
struct ixgbe_rx_buffer *rx_buffer;
/* check Rx buffer */ /* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
...@@ -1938,6 +1968,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ...@@ -1938,6 +1968,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* verify contents of skb */ /* verify contents of skb */
if (ixgbe_check_lbtest_frame(rx_buffer, size)) if (ixgbe_check_lbtest_frame(rx_buffer, size))
count++; count++;
else
break;
/* sync Rx buffer for device write */ /* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev, dma_sync_single_for_device(rx_ring->dev,
...@@ -1945,26 +1977,10 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ...@@ -1945,26 +1977,10 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
ixgbe_rx_bufsz(rx_ring), ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* unmap buffer on Tx side */ /* increment Rx next to clean counter */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
/* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
/* increment Rx/Tx next to clean counters */
rx_ntc++; rx_ntc++;
if (rx_ntc == rx_ring->count) if (rx_ntc == rx_ring->count)
rx_ntc = 0; rx_ntc = 0;
tx_ntc++;
if (tx_ntc == tx_ring->count)
tx_ntc = 0;
/* fetch next descriptor */ /* fetch next descriptor */
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
......
...@@ -806,6 +806,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, ...@@ -806,6 +806,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
ring->next = head->ring; ring->next = head->ring;
head->ring = ring; head->ring = ring;
head->count++; head->count++;
head->next_update = jiffies + 1;
} }
/** /**
...@@ -879,8 +880,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -879,8 +880,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* initialize work limits */ /* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit; q_vector->tx.work_limit = adapter->tx_work_limit;
/* initialize pointer to rings */ /* Initialize setting for adaptive ITR */
ring = q_vector->ring; q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;
q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;
/* intialize ITR */ /* intialize ITR */
if (txr_count && !rxr_count) { if (txr_count && !rxr_count) {
...@@ -897,6 +901,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -897,6 +901,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
q_vector->itr = adapter->rx_itr_setting; q_vector->itr = adapter->rx_itr_setting;
} }
/* initialize pointer to rings */
ring = q_vector->ring;
while (txr_count) { while (txr_count) {
/* assign generic ring traits */ /* assign generic ring traits */
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
......
...@@ -619,12 +619,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) ...@@ -619,12 +619,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
usleep_range(5000, 10000); usleep_range(5000, 10000);
} }
/* Failed to get SW only semaphore */
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
hw_dbg(hw, "Failed to get SW only semaphore\n");
return IXGBE_ERR_SWFW_SYNC;
}
/* If the resource is not released by the FW/HW the SW can assume that /* If the resource is not released by the FW/HW the SW can assume that
* the FW/HW malfunctions. In that case the SW should set the SW bit(s) * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
* of the requested resource(s) while ignoring the corresponding FW/HW * of the requested resource(s) while ignoring the corresponding FW/HW
...@@ -647,7 +641,8 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) ...@@ -647,7 +641,8 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*/ */
if (swfw_sync & swmask) { if (swfw_sync & swmask) {
u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
IXGBE_GSSR_SW_MNG_SM;
if (swi2c_mask) if (swi2c_mask)
rmask |= IXGBE_GSSR_I2C_MASK; rmask |= IXGBE_GSSR_I2C_MASK;
...@@ -763,6 +758,8 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) ...@@ -763,6 +758,8 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
**/ **/
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
{ {
u32 rmask;
/* First try to grab the semaphore but we don't need to bother /* First try to grab the semaphore but we don't need to bother
* looking to see whether we got the lock or not since we do * looking to see whether we got the lock or not since we do
* the same thing regardless of whether we got the lock or not. * the same thing regardless of whether we got the lock or not.
...@@ -771,6 +768,14 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) ...@@ -771,6 +768,14 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
*/ */
ixgbe_get_swfw_sync_semaphore(hw); ixgbe_get_swfw_sync_semaphore(hw);
ixgbe_release_swfw_sync_semaphore(hw); ixgbe_release_swfw_sync_semaphore(hw);
/* Acquire and release all software resources. */
rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_I2C_MASK;
ixgbe_acquire_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_X540(hw, rmask);
} }
/** /**
......
...@@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, ...@@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
/* convert offset from words to bytes */ /* convert offset from words to bytes */
buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2); buffer.length = cpu_to_be16(words_to_read * 2);
buffer.pad2 = 0;
buffer.pad3 = 0;
status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT); IXGBE_HI_COMMAND_TIMEOUT);
...@@ -3192,6 +3194,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) ...@@ -3192,6 +3194,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Identify the PHY or SFP module */ /* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw); ret_val = phy->ops.identify(hw);
if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
return ret_val;
/* Setup function pointers based on detected hardware */ /* Setup function pointers based on detected hardware */
ixgbe_init_mac_link_ops_X550em(hw); ixgbe_init_mac_link_ops_X550em(hw);
...@@ -3394,9 +3399,10 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) ...@@ -3394,9 +3399,10 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw); ixgbe_clear_tx_pending(hw);
/* PHY ops must be identified and initialized prior to reset */ /* PHY ops must be identified and initialized prior to reset */
/* Identify PHY and related function pointers */
status = hw->phy.ops.init(hw); status = hw->phy.ops.init(hw);
if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
status == IXGBE_ERR_PHY_ADDR_INVALID)
return status;
/* start the external PHY */ /* start the external PHY */
if (hw->phy.type == ixgbe_phy_x550em_ext_t) { if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
...@@ -3884,7 +3890,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { ...@@ -3884,7 +3890,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
}; };
static struct ixgbe_mac_operations mac_ops_x550em_a = { static const struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC X550_COMMON_MAC
.led_on = ixgbe_led_on_t_x550em, .led_on = ixgbe_led_on_t_x550em,
.led_off = ixgbe_led_off_t_x550em, .led_off = ixgbe_led_off_t_x550em,
...@@ -3905,7 +3911,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { ...@@ -3905,7 +3911,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
}; };
static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
X550_COMMON_MAC X550_COMMON_MAC
.led_on = ixgbe_led_on_generic, .led_on = ixgbe_led_on_generic,
.led_off = ixgbe_led_off_generic, .led_off = ixgbe_led_off_generic,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment