Commit a2f23e08 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-09-15

This series contains updates to ixgbe and fm10k.

Don fixes a ixgbe issue by adding checks for systems that do not have
SFP's to avoid incorrectly acting on interrupts that are falsely
interpreted as SFP events.

Alex Williamson adds a fix for ixgbe to disable SR-IOV prior to
unregistering the netdev to avoid issues with guest OS's which do not
support hot-unplug or their hot-unplug is broken.

Alex Duyck update the lowest limit for adaptive interrupt interrupt
moderation to about 12K interrupts per second for ixgbe.  This change
increases the performance for ixgbe.  Also fixed up fm10k to remove
the optimization that assumed that all fragments would be limited to
page size, since that assumption is incorrect as the TCP allocator can
provide up to a 32K page fragment.  Updated fm10k to add the MAC
address to the list of values recorded on driver load.  Fixes fm10k
so that we only trigger the data path reset if the fabric is ready to
handle traffic to avoid triggering the reset unless the switch API is
ready for us.

Jacob updates the fm10k driver to disable the service task during
suspend and re-enable it after we resume. If we don't do this, the
device could be UP when you suspend and come back from resume as
DOWN.  Also update fm10k to prevent the removal of default VID rules,
 and correctly remove the stack layers information of the VLAN, but then
return to forwarding that VID as untagged frames.  If we deleted the VID
rules here, we would begin dropping traffic due to VLAN membership
violations.  Fixed fm10k to use pcie_get_minimum_link(), which is useful
in cases where we connect to a slot at Gen3, but the slot is behind a bus
which is only connected at Gen2.  Updated fm10k to update the netdev
permanent address during reinit instead of up to enable users to
immediately see the new MAC address on the VF even if the device is not
up.  Adds the creation of VLAN interfaces on a device, even while the
device is down for fm10k.  Fixed an issue where we request the incorrect
MAC/VLAN combinations, and prevents us from accidentally reporting some
frames as VLAN tagged.  Provided a couple of trivial fixes for fm10k
to fix code style and typos in code comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c4047f53 9adbac59
...@@ -37,7 +37,8 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos) ...@@ -37,7 +37,8 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos)
} }
static void *fm10k_dbg_desc_seq_next(struct seq_file *s, static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
void __always_unused *v, loff_t *pos) void __always_unused *v,
loff_t *pos)
{ {
struct fm10k_ring *ring = s->private; struct fm10k_ring *ring = s->private;
...@@ -45,7 +46,7 @@ static void *fm10k_dbg_desc_seq_next(struct seq_file *s, ...@@ -45,7 +46,7 @@ static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
} }
static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s, static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s,
__always_unused void *v) void __always_unused *v)
{ {
/* Do nothing. */ /* Do nothing. */
} }
......
...@@ -228,9 +228,6 @@ int fm10k_iov_resume(struct pci_dev *pdev) ...@@ -228,9 +228,6 @@ int fm10k_iov_resume(struct pci_dev *pdev)
hw->iov.ops.set_lport(hw, vf_info, i, hw->iov.ops.set_lport(hw, vf_info, i,
FM10K_VF_FLAG_MULTI_CAPABLE); FM10K_VF_FLAG_MULTI_CAPABLE);
/* assign our default vid to the VF following reset */
vf_info->sw_vid = hw->mac.default_vid;
/* mailbox is disconnected so we don't send a message */ /* mailbox is disconnected so we don't send a message */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info); hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
......
...@@ -497,8 +497,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, ...@@ -497,8 +497,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
if (rx_desc->w.vlan) { if (rx_desc->w.vlan) {
u16 vid = le16_to_cpu(rx_desc->w.vlan); u16 vid = le16_to_cpu(rx_desc->w.vlan);
if (vid != rx_ring->vid) if ((vid & VLAN_VID_MASK) != rx_ring->vid)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
else if (vid & VLAN_PRIO_MASK)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_PRIO_MASK);
} }
fm10k_type_trans(rx_ring, rx_desc, skb); fm10k_type_trans(rx_ring, rx_desc, skb);
...@@ -1079,9 +1082,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, ...@@ -1079,9 +1082,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_tx_buffer *first; struct fm10k_tx_buffer *first;
int tso; int tso;
u32 tx_flags = 0; u32 tx_flags = 0;
#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
unsigned short f; unsigned short f;
#endif
u16 count = TXD_USE_COUNT(skb_headlen(skb)); u16 count = TXD_USE_COUNT(skb_headlen(skb));
/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
...@@ -1089,12 +1090,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, ...@@ -1089,12 +1090,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* + 2 desc gap to keep tail from touching head * + 2 desc gap to keep tail from touching head
* otherwise try next time * otherwise try next time
*/ */
#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
#else
count += skb_shinfo(skb)->nr_frags;
#endif
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
......
...@@ -758,6 +758,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) ...@@ -758,6 +758,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw; struct fm10k_hw *hw = &interface->hw;
s32 err; s32 err;
int i;
/* updates do not apply to VLAN 0 */ /* updates do not apply to VLAN 0 */
if (!vid) if (!vid)
...@@ -775,8 +776,25 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) ...@@ -775,8 +776,25 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (!set) if (!set)
clear_bit(vid, interface->active_vlans); clear_bit(vid, interface->active_vlans);
/* if default VLAN is already present do nothing */ /* disable the default VID on ring if we have an active VLAN */
if (vid == hw->mac.default_vid) for (i = 0; i < interface->num_rx_queues; i++) {
struct fm10k_ring *rx_ring = interface->rx_ring[i];
u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
if (test_bit(rx_vid, interface->active_vlans))
rx_ring->vid |= FM10K_VLAN_CLEAR;
else
rx_ring->vid &= ~FM10K_VLAN_CLEAR;
}
/* Do not remove default VID related entries from VLAN and MAC tables */
if (!set && vid == hw->mac.default_vid)
return 0;
/* Do not throw an error if the interface is down. We will sync once
* we come up
*/
if (test_bit(__FM10K_DOWN, &interface->state))
return 0; return 0;
fm10k_mbx_lock(interface); fm10k_mbx_lock(interface);
...@@ -996,21 +1014,6 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) ...@@ -996,21 +1014,6 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
int xcast_mode; int xcast_mode;
u16 vid, glort; u16 vid, glort;
/* restore our address if perm_addr is set */
if (hw->mac.type == fm10k_mac_vf) {
if (is_valid_ether_addr(hw->mac.perm_addr)) {
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
if (hw->mac.vlan_override)
netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
else
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
/* record glort for this interface */ /* record glort for this interface */
glort = interface->glort; glort = interface->glort;
...@@ -1045,7 +1048,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) ...@@ -1045,7 +1048,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
vid, true, 0); vid, true, 0);
} }
/* update xcast mode before syncronizing addresses */ /* update xcast mode before synchronizing addresses */
hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */ /* synchronize all of the addresses */
......
...@@ -170,6 +170,21 @@ static void fm10k_reinit(struct fm10k_intfc *interface) ...@@ -170,6 +170,21 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
/* reassociate interrupts */ /* reassociate interrupts */
fm10k_mbx_request_irq(interface); fm10k_mbx_request_irq(interface);
/* update hardware address for VFs if perm_addr has changed */
if (hw->mac.type == fm10k_mac_vf) {
if (is_valid_ether_addr(hw->mac.perm_addr)) {
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
if (hw->mac.vlan_override)
netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
else
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
/* reset clock */ /* reset clock */
fm10k_ts_reset(interface); fm10k_ts_reset(interface);
...@@ -663,6 +678,10 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, ...@@ -663,6 +678,10 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* assign default VLAN to queue */ /* assign default VLAN to queue */
ring->vid = hw->mac.default_vid; ring->vid = hw->mac.default_vid;
/* if we have an active VLAN, disable default VID */
if (test_bit(hw->mac.default_vid, interface->active_vlans))
ring->vid |= FM10K_VLAN_CLEAR;
/* Map interrupt */ /* Map interrupt */
if (ring->q_vector) { if (ring->q_vector) {
rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
...@@ -861,10 +880,12 @@ void fm10k_netpoll(struct net_device *netdev) ...@@ -861,10 +880,12 @@ void fm10k_netpoll(struct net_device *netdev)
#endif #endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break #define FM10K_ERR_MSG(type) case (type): error = #type; break
static void fm10k_print_fault(struct fm10k_intfc *interface, int type, static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault) struct fm10k_fault *fault)
{ {
struct pci_dev *pdev = interface->pdev; struct pci_dev *pdev = interface->pdev;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_iov_data *iov_data = interface->iov_data;
char *error; char *error;
switch (type) { switch (type) {
...@@ -918,6 +939,30 @@ static void fm10k_print_fault(struct fm10k_intfc *interface, int type, ...@@ -918,6 +939,30 @@ static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
"%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n", "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
error, fault->address, fault->specinfo, error, fault->address, fault->specinfo,
PCI_SLOT(fault->func), PCI_FUNC(fault->func)); PCI_SLOT(fault->func), PCI_FUNC(fault->func));
/* For VF faults, clear out the respective LPORT, reset the queue
* resources, and then reconnect to the mailbox. This allows the
* VF in question to resume behavior. For transient faults that are
* the result of non-malicious behavior this will log the fault and
* allow the VF to resume functionality. Obviously for malicious VFs
* they will be able to attempt malicious behavior again. In this
* case, the system administrator will need to step in and manually
* remove or disable the VF in question.
*/
if (fault->func && iov_data) {
int vf = fault->func - 1;
struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
hw->iov.ops.reset_lport(hw, vf_info);
hw->iov.ops.reset_resources(hw, vf_info);
/* reset_lport disables the VF, so re-enable it */
hw->iov.ops.set_lport(hw, vf_info, vf,
FM10K_VF_FLAG_MULTI_CAPABLE);
/* reset_resources will disconnect from the mbx */
vf_info->mbx.ops.connect(hw, &vf_info->mbx);
}
} }
static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
...@@ -941,7 +986,7 @@ static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) ...@@ -941,7 +986,7 @@ static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
continue; continue;
} }
fm10k_print_fault(interface, type, &fault); fm10k_handle_fault(interface, type, &fault);
} }
} }
...@@ -1705,22 +1750,86 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, ...@@ -1705,22 +1750,86 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
static void fm10k_slot_warn(struct fm10k_intfc *interface) static void fm10k_slot_warn(struct fm10k_intfc *interface)
{ {
struct device *dev = &interface->pdev->dev; enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
struct fm10k_hw *hw = &interface->hw; struct fm10k_hw *hw = &interface->hw;
int max_gts = 0, expected_gts = 0;
if (hw->mac.ops.is_slot_appropriate(hw)) if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
dev_warn(&interface->pdev->dev,
"Unable to determine PCI Express bandwidth.\n");
return; return;
}
switch (speed) {
case PCIE_SPEED_2_5GT:
/* 8b/10b encoding reduces max throughput by 20% */
max_gts = 2 * width;
break;
case PCIE_SPEED_5_0GT:
/* 8b/10b encoding reduces max throughput by 20% */
max_gts = 4 * width;
break;
case PCIE_SPEED_8_0GT:
/* 128b/130b encoding has less than 2% impact on throughput */
max_gts = 8 * width;
break;
default:
dev_warn(&interface->pdev->dev,
"Unable to determine PCI Express bandwidth.\n");
return;
}
dev_info(&interface->pdev->dev,
"PCI Express bandwidth of %dGT/s available\n",
max_gts);
dev_info(&interface->pdev->dev,
"(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
(speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
"Unknown"),
hw->bus.width,
(speed == PCIE_SPEED_2_5GT ? "20%" :
speed == PCIE_SPEED_5_0GT ? "20%" :
speed == PCIE_SPEED_8_0GT ? "<2%" :
"Unknown"),
(hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
"Unknown"));
dev_warn(dev, switch (hw->bus_caps.speed) {
"For optimal performance, a %s %s slot is recommended.\n", case fm10k_bus_speed_2500:
(hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" : /* 8b/10b encoding reduces max throughput by 20% */
hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" : expected_gts = 2 * hw->bus_caps.width;
"x8"), break;
(hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : case fm10k_bus_speed_5000:
hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : /* 8b/10b encoding reduces max throughput by 20% */
"8.0GT/s")); expected_gts = 4 * hw->bus_caps.width;
dev_warn(dev, break;
"A slot with more lanes and/or higher speed is suggested.\n"); case fm10k_bus_speed_8000:
/* 128b/130b encoding has less than 2% impact on throughput */
expected_gts = 8 * hw->bus_caps.width;
break;
default:
dev_warn(&interface->pdev->dev,
"Unable to determine expected PCI Express bandwidth.\n");
return;
}
if (max_gts < expected_gts) {
dev_warn(&interface->pdev->dev,
"This device requires %dGT/s of bandwidth for optimal performance.\n",
expected_gts);
dev_warn(&interface->pdev->dev,
"A %sslot with x%d lanes is suggested.\n",
(hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
hw->bus_caps.width);
}
} }
/** /**
...@@ -1739,7 +1848,6 @@ static int fm10k_probe(struct pci_dev *pdev, ...@@ -1739,7 +1848,6 @@ static int fm10k_probe(struct pci_dev *pdev,
{ {
struct net_device *netdev; struct net_device *netdev;
struct fm10k_intfc *interface; struct fm10k_intfc *interface;
struct fm10k_hw *hw;
int err; int err;
err = pci_enable_device_mem(pdev); err = pci_enable_device_mem(pdev);
...@@ -1783,7 +1891,6 @@ static int fm10k_probe(struct pci_dev *pdev, ...@@ -1783,7 +1891,6 @@ static int fm10k_probe(struct pci_dev *pdev,
interface->netdev = netdev; interface->netdev = netdev;
interface->pdev = pdev; interface->pdev = pdev;
hw = &interface->hw;
interface->uc_addr = ioremap(pci_resource_start(pdev, 0), interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
FM10K_UC_ADDR_SIZE); FM10K_UC_ADDR_SIZE);
...@@ -1825,24 +1932,12 @@ static int fm10k_probe(struct pci_dev *pdev, ...@@ -1825,24 +1932,12 @@ static int fm10k_probe(struct pci_dev *pdev,
/* Register PTP interface */ /* Register PTP interface */
fm10k_ptp_register(interface); fm10k_ptp_register(interface);
/* print bus type/speed/width info */
dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
(hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
"Unknown"),
(hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
"Unknown"),
(hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
"Unknown"));
/* print warning for non-optimal configurations */ /* print warning for non-optimal configurations */
fm10k_slot_warn(interface); fm10k_slot_warn(interface);
/* report MAC address for logging */
dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
/* enable SR-IOV after registering netdev to enforce PF/VF ordering */ /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
fm10k_iov_configure(pdev, 0); fm10k_iov_configure(pdev, 0);
...@@ -1983,6 +2078,16 @@ static int fm10k_resume(struct pci_dev *pdev) ...@@ -1983,6 +2078,16 @@ static int fm10k_resume(struct pci_dev *pdev)
if (err) if (err)
return err; return err;
/* assume host is not ready, to prevent race with watchdog in case we
* actually don't have connection to the switch
*/
interface->host_ready = false;
fm10k_watchdog_host_not_ready(interface);
/* clear the service task disable bit to allow service task to start */
clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
fm10k_service_event_schedule(interface);
/* restore SR-IOV interface */ /* restore SR-IOV interface */
fm10k_iov_resume(pdev); fm10k_iov_resume(pdev);
...@@ -2010,6 +2115,15 @@ static int fm10k_suspend(struct pci_dev *pdev, ...@@ -2010,6 +2115,15 @@ static int fm10k_suspend(struct pci_dev *pdev,
fm10k_iov_suspend(pdev); fm10k_iov_suspend(pdev);
/* the watchdog tasks may read registers, which will appear like a
* surprise-remove event once the PCI device is disabled. This will
* cause us to close the netdevice, so we don't retain the open/closed
* state post-resume. Prevent this by disabling the service task while
* suspended, until we actually resume.
*/
set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
cancel_work_sync(&interface->service_task);
rtnl_lock(); rtnl_lock();
if (netif_running(netdev)) if (netif_running(netdev))
......
...@@ -59,6 +59,11 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) ...@@ -59,6 +59,11 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING; return FM10K_ERR_DMA_PENDING;
/* verify the switch is ready for reset */
reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
goto out;
/* Inititate data path reset */ /* Inititate data path reset */
reg |= FM10K_DMA_CTRL_DATAPATH_RESET; reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
...@@ -72,6 +77,7 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) ...@@ -72,6 +77,7 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
if (!(reg & FM10K_IP_NOTINRESET)) if (!(reg & FM10K_IP_NOTINRESET))
err = FM10K_ERR_RESET_FAILED; err = FM10K_ERR_RESET_FAILED;
out:
return err; return err;
} }
...@@ -184,19 +190,6 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) ...@@ -184,19 +190,6 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
return 0; return 0;
} }
/**
* fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
* @hw: pointer to hardware structure
*
* Looks at the PCIe bus info to confirm whether or not this slot can support
* the necessary bandwidth for this device.
**/
static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
{
return (hw->bus.speed == hw->bus_caps.speed) &&
(hw->bus.width == hw->bus_caps.width);
}
/** /**
* fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
...@@ -1161,6 +1154,24 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, ...@@ -1161,6 +1154,24 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
return hw->iov.ops.assign_int_moderator(hw, vf_idx); return hw->iov.ops.assign_int_moderator(hw, vf_idx);
} }
/**
* fm10k_iov_select_vid - Select correct default VID
* @hw: Pointer to hardware structure
* @vid: VID to correct
*
* Will report an error if VID is out of range. For VID = 0, it will return
* either the pf_vid or sw_vid depending on which one is set.
*/
static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
{
if (!vid)
return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
else if (vf_info->pf_vid && vid != vf_info->pf_vid)
return FM10K_ERR_PARAM;
else
return vid;
}
/** /**
* fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
* @hw: Pointer to hardware structure * @hw: Pointer to hardware structure
...@@ -1175,9 +1186,10 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, ...@@ -1175,9 +1186,10 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx) struct fm10k_mbx_info *mbx)
{ {
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
int err = 0;
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
u32 *result; u32 *result;
int err = 0;
bool set;
u16 vlan; u16 vlan;
u32 vid; u32 vid;
...@@ -1193,19 +1205,21 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, ...@@ -1193,19 +1205,21 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err) if (err)
return err; return err;
/* if VLAN ID is 0, set the default VLAN ID instead of 0 */ /* verify upper 16 bits are zero */
if (!vid || (vid == FM10K_VLAN_CLEAR)) { if (vid >> 16)
if (vf_info->pf_vid)
vid |= vf_info->pf_vid;
else
vid |= vf_info->sw_vid;
} else if (vid != vf_info->pf_vid) {
return FM10K_ERR_PARAM; return FM10K_ERR_PARAM;
}
set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR;
err = fm10k_iov_select_vid(vf_info, vid);
if (err < 0)
return err;
else
vid = err;
/* update VSI info for VF in regards to VLAN table */ /* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
!(vid & FM10K_VLAN_CLEAR));
} }
if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
...@@ -1221,19 +1235,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, ...@@ -1221,19 +1235,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
memcmp(mac, vf_info->mac, ETH_ALEN)) memcmp(mac, vf_info->mac, ETH_ALEN))
return FM10K_ERR_PARAM; return FM10K_ERR_PARAM;
/* if VLAN ID is 0, set the default VLAN ID instead of 0 */ set = !(vlan & FM10K_VLAN_CLEAR);
if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { vlan &= ~FM10K_VLAN_CLEAR;
if (vf_info->pf_vid)
vlan |= vf_info->pf_vid; err = fm10k_iov_select_vid(vf_info, vlan);
else if (err < 0)
vlan |= vf_info->sw_vid; return err;
} else if (vf_info->pf_vid) { else
return FM10K_ERR_PARAM; vlan = err;
}
/* notify switch of request for new unicast address */ /* notify switch of request for new unicast address */
err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan, err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
!(vlan & FM10K_VLAN_CLEAR), 0); mac, vlan, set, 0);
} }
if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
...@@ -1248,19 +1261,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, ...@@ -1248,19 +1261,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
return FM10K_ERR_PARAM; return FM10K_ERR_PARAM;
/* if VLAN ID is 0, set the default VLAN ID instead of 0 */ set = !(vlan & FM10K_VLAN_CLEAR);
if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { vlan &= ~FM10K_VLAN_CLEAR;
if (vf_info->pf_vid)
vlan |= vf_info->pf_vid; err = fm10k_iov_select_vid(vf_info, vlan);
else if (err < 0)
vlan |= vf_info->sw_vid; return err;
} else if (vf_info->pf_vid) { else
return FM10K_ERR_PARAM; vlan = err;
}
/* notify switch of request for new multicast address */ /* notify switch of request for new multicast address */
err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan, err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
!(vlan & FM10K_VLAN_CLEAR)); mac, vlan, set);
} }
return err; return err;
...@@ -1849,7 +1861,6 @@ static struct fm10k_mac_ops mac_ops_pf = { ...@@ -1849,7 +1861,6 @@ static struct fm10k_mac_ops mac_ops_pf = {
.init_hw = &fm10k_init_hw_pf, .init_hw = &fm10k_init_hw_pf,
.start_hw = &fm10k_start_hw_generic, .start_hw = &fm10k_start_hw_generic,
.stop_hw = &fm10k_stop_hw_generic, .stop_hw = &fm10k_stop_hw_generic,
.is_slot_appropriate = &fm10k_is_slot_appropriate_pf,
.update_vlan = &fm10k_update_vlan_pf, .update_vlan = &fm10k_update_vlan_pf,
.read_mac_addr = &fm10k_read_mac_addr_pf, .read_mac_addr = &fm10k_read_mac_addr_pf,
.update_uc_addr = &fm10k_update_uc_addr_pf, .update_uc_addr = &fm10k_update_uc_addr_pf,
......
...@@ -521,7 +521,6 @@ struct fm10k_mac_ops { ...@@ -521,7 +521,6 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *); s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *); s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *); s32 (*get_host_state)(struct fm10k_hw *, bool *);
bool (*is_slot_appropriate)(struct fm10k_hw *);
s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
s32 (*read_mac_addr)(struct fm10k_hw *); s32 (*read_mac_addr)(struct fm10k_hw *);
s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
......
...@@ -131,19 +131,6 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) ...@@ -131,19 +131,6 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
return 0; return 0;
} }
/**
* fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
* @hw: pointer to hardware structure
*
* Looks at the PCIe bus info to confirm whether or not this slot can support
* the necessary bandwidth for this device. Since the VF has no control over
* the "slot" it is in, always indicate that the slot is appropriate.
**/
static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
{
return true;
}
/* This structure defines the attibutes to be parsed below */ /* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
...@@ -552,7 +539,6 @@ static struct fm10k_mac_ops mac_ops_vf = { ...@@ -552,7 +539,6 @@ static struct fm10k_mac_ops mac_ops_vf = {
.init_hw = &fm10k_init_hw_vf, .init_hw = &fm10k_init_hw_vf,
.start_hw = &fm10k_start_hw_generic, .start_hw = &fm10k_start_hw_generic,
.stop_hw = &fm10k_stop_hw_vf, .stop_hw = &fm10k_stop_hw_vf,
.is_slot_appropriate = &fm10k_is_slot_appropriate_vf,
.update_vlan = &fm10k_update_vlan_vf, .update_vlan = &fm10k_update_vlan_vf,
.read_mac_addr = &fm10k_read_mac_addr_vf, .read_mac_addr = &fm10k_read_mac_addr_vf,
.update_uc_addr = &fm10k_update_uc_addr_vf, .update_uc_addr = &fm10k_update_uc_addr_vf,
......
...@@ -539,8 +539,7 @@ struct hwmon_buff { ...@@ -539,8 +539,7 @@ struct hwmon_buff {
#define IXGBE_MIN_RSC_ITR 24 #define IXGBE_MIN_RSC_ITR 24
#define IXGBE_100K_ITR 40 #define IXGBE_100K_ITR 40
#define IXGBE_20K_ITR 200 #define IXGBE_20K_ITR 200
#define IXGBE_10K_ITR 400 #define IXGBE_12K_ITR 336
#define IXGBE_8K_ITR 500
/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
......
...@@ -2286,7 +2286,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2286,7 +2286,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_itr_setting = ec->tx_coalesce_usecs; adapter->tx_itr_setting = ec->tx_coalesce_usecs;
if (adapter->tx_itr_setting == 1) if (adapter->tx_itr_setting == 1)
tx_itr_param = IXGBE_10K_ITR; tx_itr_param = IXGBE_12K_ITR;
else else
tx_itr_param = adapter->tx_itr_setting; tx_itr_param = adapter->tx_itr_setting;
......
...@@ -866,7 +866,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -866,7 +866,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (txr_count && !rxr_count) { if (txr_count && !rxr_count) {
/* tx only vector */ /* tx only vector */
if (adapter->tx_itr_setting == 1) if (adapter->tx_itr_setting == 1)
q_vector->itr = IXGBE_10K_ITR; q_vector->itr = IXGBE_12K_ITR;
else else
q_vector->itr = adapter->tx_itr_setting; q_vector->itr = adapter->tx_itr_setting;
} else { } else {
......
...@@ -2261,7 +2261,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, ...@@ -2261,7 +2261,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
/* simple throttlerate management /* simple throttlerate management
* 0-10MB/s lowest (100000 ints/s) * 0-10MB/s lowest (100000 ints/s)
* 10-20MB/s low (20000 ints/s) * 10-20MB/s low (20000 ints/s)
* 20-1249MB/s bulk (8000 ints/s) * 20-1249MB/s bulk (12000 ints/s)
*/ */
/* what was last interrupt timeslice? */ /* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2; timepassed_us = q_vector->itr >> 2;
...@@ -2350,7 +2350,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) ...@@ -2350,7 +2350,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
new_itr = IXGBE_20K_ITR; new_itr = IXGBE_20K_ITR;
break; break;
case bulk_latency: case bulk_latency:
new_itr = IXGBE_8K_ITR; new_itr = IXGBE_12K_ITR;
break; break;
default: default:
break; break;
...@@ -2495,17 +2495,26 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) ...@@ -2495,17 +2495,26 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
if (eicr & IXGBE_EICR_GPI_SDP2(hw)) { if (!ixgbe_is_sfp(hw))
return;
/* Later MAC's use different SDP */
if (hw->mac.type >= ixgbe_mac_X540)
eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
if (eicr & eicr_mask) {
/* Clear the interrupt */ /* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw)); IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) { if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
ixgbe_service_event_schedule(adapter); ixgbe_service_event_schedule(adapter);
} }
} }
if (eicr & IXGBE_EICR_GPI_SDP1(hw)) { if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
(eicr & IXGBE_EICR_GPI_SDP1(hw))) {
/* Clear the interrupt */ /* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) { if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
...@@ -9019,12 +9028,12 @@ static void ixgbe_remove(struct pci_dev *pdev) ...@@ -9019,12 +9028,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
/* remove the added san mac */ /* remove the added san mac */
ixgbe_del_sanmac_netdev(netdev); ixgbe_del_sanmac_netdev(netdev);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
ixgbe_disable_sriov(adapter); ixgbe_disable_sriov(adapter);
#endif #endif
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
ixgbe_clear_interrupt_scheme(adapter); ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter); ixgbe_release_hw_control(adapter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment