Commit a5ebfe12 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2020-02-19

This series contains updates to e1000e and igc drivers.

Ben Dooks adds a missing cpu_to_le64() in the e1000e transmit ring flush
function.

Jia-Ju Bai replaces a couple of udelay() with usleep_range() where we
could sleep while holding a spinlock in e1000e.

Chen Zhou make 2 functions static in igc,

Sasha finishes the legacy power management support in igc by adding
resume and schedule suspend requests.  Also added register dump
functionality in the igc driver.  Added device id support for the next
generation of i219 devices in e1000e.  Fixed a typo in the igc driver
that referenced a device that is not support in the driver.  Added the
missing PTP support when suspending now that igc has legacy power
management support.  Added PCIe error detection, slot reset and resume
capability in igc.  Added WoL support for igc as well.  Lastly, added a
code comment to distinguish between interrupt and flag definitions.

Vitaly adds device id support for Tiger Lake platforms, which has
another next generation of i219 device in e1000e.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5f9721a2 67082b53
......@@ -897,6 +897,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
/* fall through */
case e1000_pch_tgp:
case e1000_pch_adp:
mask |= BIT(18);
break;
default:
......@@ -1561,6 +1562,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
......
......@@ -97,6 +97,11 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5
#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E
#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
#define E1000_REVISION_4 4
......@@ -121,6 +126,7 @@ enum e1000_mac_type {
e1000_pch_spt,
e1000_pch_cnp,
e1000_pch_tgp,
e1000_pch_adp,
};
enum e1000_media_type {
......
......@@ -317,6 +317,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
......@@ -460,6 +461,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
......@@ -703,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
......@@ -1642,6 +1645,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
......@@ -2095,6 +2099,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
......@@ -3133,6 +3138,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
......@@ -4077,6 +4083,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
......
......@@ -1363,7 +1363,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
usleep_range(50, 100);
udelay(100);
i++;
}
......@@ -1381,7 +1381,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
usleep_range(50, 100);
udelay(100);
}
if (i == timeout) {
......
......@@ -3536,6 +3536,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
break;
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
......@@ -3807,7 +3808,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
tdt = er32(TDT(0));
BUG_ON(tdt != tx_ring->next_to_use);
tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
tx_desc->buffer_addr = tx_ring->dma;
tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
tx_desc->lower.data = cpu_to_le32(txd_lower | size);
tx_desc->upper.data = 0;
......@@ -4049,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
......@@ -7757,6 +7759,11 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
......
......@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
case e1000_pch_adp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
......
......@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_ethtool.o igc_ptp.o
igc_ethtool.o igc_ptp.o igc_dump.o
......@@ -42,6 +42,10 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
extern char igc_driver_name[];
extern char igc_driver_version[];
......@@ -53,10 +57,13 @@ extern char igc_driver_version[];
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
......@@ -108,7 +115,7 @@ extern char igc_driver_version[];
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
/* FIXME: These values were estimated using the ones that i210 has as
/* FIXME: These values were estimated using the ones that i225 has as
* basis, they seem to provide good numbers with ptp4l/phc2sys, but we
* need to confirm them.
*/
......@@ -552,6 +559,7 @@ int igc_erase_filter(struct igc_adapter *adapter,
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
......
......@@ -16,7 +16,10 @@
/* Wake Up Filter Control */
#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
......@@ -259,6 +262,9 @@
#define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */
/* Transmit Descriptor bit definitions */
#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include "igc.h"
struct igc_reg_info {
u32 ofs;
char *name;
};
static const struct igc_reg_info igc_reg_info_tbl[] = {
/* General Registers */
{IGC_CTRL, "CTRL"},
{IGC_STATUS, "STATUS"},
{IGC_CTRL_EXT, "CTRL_EXT"},
{IGC_MDIC, "MDIC"},
/* Interrupt Registers */
{IGC_ICR, "ICR"},
/* RX Registers */
{IGC_RCTL, "RCTL"},
{IGC_RDLEN(0), "RDLEN"},
{IGC_RDH(0), "RDH"},
{IGC_RDT(0), "RDT"},
{IGC_RXDCTL(0), "RXDCTL"},
{IGC_RDBAL(0), "RDBAL"},
{IGC_RDBAH(0), "RDBAH"},
/* TX Registers */
{IGC_TCTL, "TCTL"},
{IGC_TDBAL(0), "TDBAL"},
{IGC_TDBAH(0), "TDBAH"},
{IGC_TDLEN(0), "TDLEN"},
{IGC_TDH(0), "TDH"},
{IGC_TDT(0), "TDT"},
{IGC_TXDCTL(0), "TXDCTL"},
{IGC_TDFH, "TDFH"},
{IGC_TDFT, "TDFT"},
{IGC_TDFHS, "TDFHS"},
{IGC_TDFPC, "TDFPC"},
/* List Terminator */
{}
};
/* igc_regdump - register printout routine */
static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
{
int n = 0;
char rname[16];
u32 regs[8];
switch (reginfo->ofs) {
case IGC_RDLEN(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDLEN(n));
break;
case IGC_RDH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDH(n));
break;
case IGC_RDT(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDT(n));
break;
case IGC_RXDCTL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RXDCTL(n));
break;
case IGC_RDBAL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAL(n));
break;
case IGC_RDBAH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAH(n));
break;
case IGC_TDBAL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAL(n));
break;
case IGC_TDBAH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDBAH(n));
break;
case IGC_TDLEN(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDLEN(n));
break;
case IGC_TDH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDH(n));
break;
case IGC_TDT(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDT(n));
break;
case IGC_TXDCTL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TXDCTL(n));
break;
default:
pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
regs[2], regs[3]);
}
/* igc_rings_dump - Tx-rings and Rx-rings */
void igc_rings_dump(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct my_u0 { u64 a; u64 b; } *u0;
union igc_adv_tx_desc *tx_desc;
union igc_adv_rx_desc *rx_desc;
struct igc_ring *tx_ring;
struct igc_ring *rx_ring;
u32 staterr;
u16 i, n;
if (!netif_msg_hw(adapter))
return;
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start\n");
pr_info("%-15s %016lX %016lX\n", netdev->name,
netdev->state, dev_trans_start(netdev));
}
/* Print TX Ring Summary */
if (!netdev || !netif_running(netdev))
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igc_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp);
}
/* Print TX Rings */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
/* Transmit Descriptor Formats
*
* Advanced Transmit Descriptor
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +--------------------------------------------------------------+
* 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
* +--------------------------------------------------------------+
* 63 46 45 40 39 38 36 35 32 31 24 15 0
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
struct igc_tx_buffer *buffer_info;
tx_desc = IGC_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
dma_unmap_len(buffer_info, len),
true);
}
}
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info(" %5d %5X %5X\n",
n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
if (!netif_msg_rx_status(adapter))
goto exit;
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
/* Advanced Receive Descriptor (Read) Format
* 63 1 0
* +-----------------------------------------------------+
* 0 | Packet Buffer Address [63:1] |A0/NSE|
* +----------------------------------------------+------+
* 8 | Header Buffer Address [63:1] | DD |
* +-----------------------------------------------------+
*
*
* Advanced Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 30 21 20 17 16 4 3 0
* +------------------------------------------------------+
* 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
* | Checksum Ident | | | | Type | Type |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
struct igc_rx_buffer *buffer_info;
buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGC_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
if (staterr & IGC_RXD_STAT_DD) {
/* Descriptor Done */
pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
"RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
next_desc);
} else {
pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
page_address
(buffer_info->page) +
buffer_info->page_offset,
igc_rx_bufsz(rx_ring),
true);
}
}
}
}
exit:
return;
}
/* igc_regs_dump - registers dump */
void igc_regs_dump(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
struct igc_reg_info *reginfo;
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
pr_info(" Register Name Value\n");
for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
reginfo->name; reginfo++) {
igc_regdump(hw, reginfo);
}
}
......@@ -308,6 +308,65 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
}
static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
wol->wolopts = 0;
if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC |
WAKE_PHY;
/* apply any specific unsupported masks here */
switch (adapter->hw.device_id) {
default:
break;
}
if (adapter->wol & IGC_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & IGC_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & IGC_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & IGC_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & IGC_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
}
static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
return -EOPNOTSUPP;
if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
adapter->wol = 0;
if (wol->wolopts & WAKE_UCAST)
adapter->wol |= IGC_WUFC_EX;
if (wol->wolopts & WAKE_MCAST)
adapter->wol |= IGC_WUFC_MC;
if (wol->wolopts & WAKE_BCAST)
adapter->wol |= IGC_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= IGC_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= IGC_WUFC_LNKC;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
static u32 igc_get_msglevel(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
......@@ -1859,6 +1918,8 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_drvinfo = igc_get_drvinfo,
.get_regs_len = igc_get_regs_len,
.get_regs = igc_get_regs,
.get_wol = igc_get_wol,
.set_wol = igc_set_wol,
.get_msglevel = igc_get_msglevel,
.set_msglevel = igc_set_msglevel,
.nway_reset = igc_nway_reset,
......
......@@ -3546,6 +3546,8 @@ static void igc_reset_task(struct work_struct *work)
adapter = container_of(work, struct igc_adapter, reset_task);
igc_rings_dump(adapter);
igc_regs_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igc_reinit_locked(adapter);
}
......@@ -4029,6 +4031,9 @@ static void igc_watchdog_task(struct work_struct *work)
}
}
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
......@@ -4114,6 +4119,8 @@ static void igc_watchdog_task(struct work_struct *work)
return;
}
}
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
/* also check for alternate media here */
} else if (!netif_carrier_ok(netdev) &&
......@@ -4337,6 +4344,7 @@ static int igc_request_irq(struct igc_adapter *adapter)
static int __igc_open(struct net_device *netdev, bool resuming)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
int err = 0;
int i = 0;
......@@ -4348,6 +4356,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return -EBUSY;
}
if (!resuming)
pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
/* allocate transmit descriptors */
......@@ -4386,6 +4397,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
rd32(IGC_ICR);
igc_irq_enable(adapter);
if (!resuming)
pm_runtime_put(&pdev->dev);
netif_tx_start_all_queues(netdev);
/* start the watchdog. */
......@@ -4404,6 +4418,8 @@ static int __igc_open(struct net_device *netdev, bool resuming)
igc_free_all_tx_resources(adapter);
err_setup_tx:
igc_reset(adapter);
if (!resuming)
pm_runtime_put(&pdev->dev);
return err;
}
......@@ -4428,9 +4444,13 @@ static int igc_open(struct net_device *netdev)
static int __igc_close(struct net_device *netdev, bool suspending)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
if (!suspending)
pm_runtime_get_sync(&pdev->dev);
igc_down(adapter);
igc_release_hw_control(adapter);
......@@ -4440,6 +4460,9 @@ static int __igc_close(struct net_device *netdev, bool suspending)
igc_free_all_tx_resources(adapter);
igc_free_all_rx_resources(adapter);
if (!suspending)
pm_runtime_put_sync(&pdev->dev);
return 0;
}
......@@ -4766,6 +4789,16 @@ static int igc_probe(struct pci_dev *pdev,
hw->fc.requested_mode = igc_fc_default;
hw->fc.current_mode = igc_fc_default;
/* By default, support wake on port A */
adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
/* initialize the wol settings based on the eeprom settings */
if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
adapter->wol |= IGC_WUFC_MAG;
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igc_reset(adapter);
......@@ -4792,6 +4825,10 @@ static int igc_probe(struct pci_dev *pdev,
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_put_noidle(&pdev->dev);
return 0;
err_register:
......@@ -4826,6 +4863,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
pm_runtime_get_noresume(&pdev->dev);
igc_ptp_stop(adapter);
set_bit(__IGC_DOWN, &adapter->state);
......@@ -4870,6 +4909,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igc_close(netdev, true);
igc_ptp_suspend(adapter);
igc_clear_interrupt_scheme(adapter);
rtnl_unlock();
......@@ -5045,6 +5086,108 @@ static void igc_shutdown(struct pci_dev *pdev)
}
}
/**
* igc_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current PCI connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
**/
static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/**
* igc_io_slot_reset - called after the PCI bus has been reset.
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
"Could not re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
/* In case of PCI error, adapter loses its HW address
* so we should re-assign it here.
*/
hw->hw_addr = adapter->io_addr;
igc_reset(adapter);
wr32(IGC_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
}
return result;
}
/**
* igc_io_resume - called when traffic can start to flow again.
* @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
rtnl_lock();
if (netif_running(netdev)) {
if (igc_open(netdev)) {
dev_err(&pdev->dev, "igc_open failed after reset\n");
return;
}
}
netif_device_attach(netdev);
/* let the f/w know that the h/w is now under the control of the
* driver.
*/
igc_get_hw_control(adapter);
rtnl_unlock();
}
static const struct pci_error_handlers igc_err_handler = {
.error_detected = igc_io_error_detected,
.slot_reset = igc_io_slot_reset,
.resume = igc_io_resume,
};
#ifdef CONFIG_PM
static const struct dev_pm_ops igc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
......@@ -5062,6 +5205,7 @@ static struct pci_driver igc_driver = {
.driver.pm = &igc_pm_ops,
#endif
.shutdown = igc_shutdown,
.err_handler = &igc_err_handler,
};
/**
......
......@@ -509,7 +509,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
*/
void igc_ptp_tx_work(struct work_struct *work)
static void igc_ptp_tx_work(struct work_struct *work)
{
struct igc_adapter *adapter = container_of(work, struct igc_adapter,
ptp_tx_work);
......
......@@ -17,6 +17,11 @@
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment