Commit 31d035a0 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2016-02-15

This series contains updates to igb only.

Shota Suzuki cleans up unnecessary flag setting for 82576 in
igb_set_flag_queue_pairs() since the default block already sets
IGB_FLAG_QUEUE_PAIRS to the correct value anyways, so the e1000_82576
code block is not necessary and we can simply fall through.  Then fixes
an issue where IGB_FLAG_QUEUE_PAIRS can now be set by using "ethtool -L"
option but is never cleared unless the driver is reloaded, so clear the
queue pairing if the pairing becomes unnecessary as a result of "ethtool
-L".

Mitch fixes the igbvf from giving up if it fails to get the hardware
mailbox lock.  This can happen when the PF-VF communication channel is
heavily loaded and causes complete communications failure between the
PF and VF drivers, so add a counter and a delay so that the driver will
now retry ten times before giving up on getting the mailbox lock.

The remaining patches in the series are from Alex Duyck, starting with the
cleaning up code that sets the MAC address.  Then refactors the VFTA and
VLVF configuration, to simplify and update to similar setups in the ixgbe
driver.  Fixed an issue were VLANs headers size was being added to the
value programmed into the RLPML registers, yet these registers already
take into account the size of the VLAN headers when determining the
maximum packet length, so we can drop the code that adds the size to
the RLPML registers.  Cleaned up the configuration of the VF port based
VLAN configuration.  Also fixed the igb driver so that we can fully
support SR-IOV or the recently added NTUPLE filtering while allowing
support for VLAN promiscuous mode.  Also added the ability to use the
bridge utility to add a FDB entry for the PF to an igb port.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9a14b1c2 bf456abb
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "e1000_mac.h" #include "e1000_mac.h"
#include "e1000_82575.h" #include "e1000_82575.h"
#include "e1000_i210.h" #include "e1000_i210.h"
#include "igb.h"
static s32 igb_get_invariants_82575(struct e1000_hw *); static s32 igb_get_invariants_82575(struct e1000_hw *);
static s32 igb_acquire_phy_82575(struct e1000_hw *); static s32 igb_acquire_phy_82575(struct e1000_hw *);
...@@ -71,6 +72,32 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); ...@@ -71,6 +72,32 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] = { static const u16 e1000_82580_rxpbs_table[] = {
36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
/* Due to a hw errata, if the host tries to configure the VFTA register
* while performing queries from the BMC or DMA, then the VFTA in some
* cases won't be written.
*/
/**
* igb_write_vfta_i350 - Write value to VLAN filter table
* @hw: pointer to the HW structure
* @offset: register offset in VLAN filter table
* @value: register value written to VLAN filter table
*
* Writes value at the given offset in the register array which stores
* the VLAN filter table.
**/
static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
{
struct igb_adapter *adapter = hw->back;
int i;
for (i = 10; i--;)
array_wr32(E1000_VFTA, offset, value);
wrfl();
adapter->shadow_vfta[offset] = value;
}
/** /**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -398,6 +425,8 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) ...@@ -398,6 +425,8 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
/* Set mta register count */ /* Set mta register count */
mac->mta_reg_count = 128; mac->mta_reg_count = 128;
/* Set uta register count */
mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
/* Set rar entry count */ /* Set rar entry count */
switch (mac->type) { switch (mac->type) {
case e1000_82576: case e1000_82576:
...@@ -429,6 +458,11 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) ...@@ -429,6 +458,11 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
} }
if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
mac->ops.write_vfta = igb_write_vfta_i350;
else
mac->ops.write_vfta = igb_write_vfta;
/* Set if part includes ASF firmware */ /* Set if part includes ASF firmware */
mac->asf_firmware_present = true; mac->asf_firmware_present = true;
/* Set if manageability features are enabled. */ /* Set if manageability features are enabled. */
...@@ -1517,9 +1551,6 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) ...@@ -1517,9 +1551,6 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
/* Disabling VLAN filtering */ /* Disabling VLAN filtering */
hw_dbg("Initializing the IEEE VLAN\n"); hw_dbg("Initializing the IEEE VLAN\n");
if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
igb_clear_vfta_i350(hw);
else
igb_clear_vfta(hw); igb_clear_vfta(hw);
/* Setup the receive address */ /* Setup the receive address */
......
...@@ -356,7 +356,8 @@ ...@@ -356,7 +356,8 @@
/* Ethertype field values */ /* Ethertype field values */
#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
#define MAX_JUMBO_FRAME_SIZE 0x3F00 /* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
/* PBA constants */ /* PBA constants */
#define E1000_PBA_34K 0x0022 #define E1000_PBA_34K 0x0022
......
...@@ -325,7 +325,7 @@ struct e1000_mac_operations { ...@@ -325,7 +325,7 @@ struct e1000_mac_operations {
s32 (*get_thermal_sensor_data)(struct e1000_hw *); s32 (*get_thermal_sensor_data)(struct e1000_hw *);
s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
#endif #endif
void (*write_vfta)(struct e1000_hw *, u32, u32);
}; };
struct e1000_phy_operations { struct e1000_phy_operations {
......
...@@ -92,10 +92,8 @@ void igb_clear_vfta(struct e1000_hw *hw) ...@@ -92,10 +92,8 @@ void igb_clear_vfta(struct e1000_hw *hw)
{ {
u32 offset; u32 offset;
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
array_wr32(E1000_VFTA, offset, 0); hw->mac.ops.write_vfta(hw, offset, 0);
wrfl();
}
} }
/** /**
...@@ -107,54 +105,14 @@ void igb_clear_vfta(struct e1000_hw *hw) ...@@ -107,54 +105,14 @@ void igb_clear_vfta(struct e1000_hw *hw)
* Writes value at the given offset in the register array which stores * Writes value at the given offset in the register array which stores
* the VLAN filter table. * the VLAN filter table.
**/ **/
static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
{
array_wr32(E1000_VFTA, offset, value);
wrfl();
}
/* Due to a hw errata, if the host tries to configure the VFTA register
* while performing queries from the BMC or DMA, then the VFTA in some
* cases won't be written.
*/
/**
* igb_clear_vfta_i350 - Clear VLAN filter table
* @hw: pointer to the HW structure
*
* Clears the register array which contains the VLAN filter table by
* setting all the values to 0.
**/
void igb_clear_vfta_i350(struct e1000_hw *hw)
{
u32 offset;
int i;
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
for (i = 0; i < 10; i++)
array_wr32(E1000_VFTA, offset, 0);
wrfl();
}
}
/**
* igb_write_vfta_i350 - Write value to VLAN filter table
* @hw: pointer to the HW structure
* @offset: register offset in VLAN filter table
* @value: register value written to VLAN filter table
*
* Writes value at the given offset in the register array which stores
* the VLAN filter table.
**/
static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
{ {
int i; struct igb_adapter *adapter = hw->back;
for (i = 0; i < 10; i++)
array_wr32(E1000_VFTA, offset, value); array_wr32(E1000_VFTA, offset, value);
wrfl(); wrfl();
adapter->shadow_vfta[offset] = value;
} }
/** /**
...@@ -182,41 +140,156 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) ...@@ -182,41 +140,156 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
hw->mac.ops.rar_set(hw, mac_addr, i); hw->mac.ops.rar_set(hw, mac_addr, i);
} }
/**
* igb_find_vlvf_slot - find the VLAN id or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vlvf_bypass: skip VLVF if no match is found
*
* return the VLVF index where this VLAN id should be placed
*
**/
static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
{
s32 regindex, first_empty_slot;
u32 bits;
/* short cut the special case */
if (vlan == 0)
return 0;
/* if vlvf_bypass is set we don't want to use an empty slot, we
* will simply bypass the VLVF if there are no entries present in the
* VLVF that contain our VLAN
*/
first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
/* Search for the VLAN id in the VLVF entries. Save off the first empty
* slot found along the way.
*
* pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
*/
for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
if (bits == vlan)
return regindex;
if (!first_empty_slot && !bits)
first_empty_slot = regindex;
}
return first_empty_slot ? : -E1000_ERR_NO_SPACE;
}
/** /**
* igb_vfta_set - enable or disable vlan in VLAN filter table * igb_vfta_set - enable or disable vlan in VLAN filter table
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @vid: VLAN id to add or remove * @vlan: VLAN id to add or remove
* @add: if true add filter, if false remove * @vind: VMDq output index that maps queue to VLAN id
* @vlan_on: if true add filter, if false remove
* *
* Sets or clears a bit in the VLAN filter table array based on VLAN id * Sets or clears a bit in the VLAN filter table array based on VLAN id
* and if we are adding or removing the filter * and if we are adding or removing the filter
**/ **/
s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{ {
u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
u32 vfta;
struct igb_adapter *adapter = hw->back; struct igb_adapter *adapter = hw->back;
s32 ret_val = 0; u32 regidx, vfta_delta, vfta, bits;
s32 vlvf_index;
vfta = adapter->shadow_vfta[index]; if ((vlan > 4095) || (vind > 7))
return -E1000_ERR_PARAM;
/* bit was set/cleared before we started */ /* this is a 2 part operation - first the VFTA, then the
if ((!!(vfta & mask)) == add) { * VLVF and VLVFB if VT Mode is set
ret_val = -E1000_ERR_CONFIG; * We don't write the VFTA until we know the VLVF part succeeded.
} else { */
if (add)
vfta |= mask; /* Part 1
else * The VFTA is a bitstring made up of 128 32-bit registers
vfta &= ~mask; * that enable the particular VLAN id, much like the MTA:
* bits[11-5]: which register
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
vfta_delta = 1 << (vlan % 32);
vfta = adapter->shadow_vfta[regidx];
/* vfta_delta represents the difference between the current value
* of vfta and the value we want in the register. Since the diff
* is an XOR mask we can just update vfta using an XOR.
*/
vfta_delta &= vlan_on ? ~vfta : vfta;
vfta ^= vfta_delta;
/* Part 2
* If VT Mode is set
* Either vlan_on
* make sure the VLAN is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
if (!adapter->vfs_allocated_count)
goto vfta_update;
vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
if (vlvf_index < 0) {
if (vlvf_bypass)
goto vfta_update;
return vlvf_index;
} }
if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
igb_write_vfta_i350(hw, index, vfta);
else
igb_write_vfta(hw, index, vfta);
adapter->shadow_vfta[index] = vfta;
return ret_val; bits = rd32(E1000_VLVF(vlvf_index));
/* set the pool bit */
bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
/* Clear VFTA first, then disable VLVF. Otherwise
* we run the risk of stray packets leaking into
* the PF via the default pool
*/
if (vfta_delta)
hw->mac.ops.write_vfta(hw, regidx, vfta);
/* disable VLVF and clear remaining bit from pool */
wr32(E1000_VLVF(vlvf_index), 0);
return 0;
}
/* If there are still bits set in the VLVFB registers
* for the VLAN ID indicated we need to see if the
* caller is requesting that we clear the VFTA entry bit.
* If the caller has requested that we clear the VFTA
* entry bit but there are still pools/VFs using this VLAN
* ID entry then ignore the request. We're not worried
* about the case where we're turning the VFTA VLAN ID
* entry bit on, only when requested to turn it off as
* there may be multiple pools and/or VFs using the
* VLAN ID entry. In that case we cannot clear the
* VFTA bit until all pools/VFs using that VLAN ID have also
* been cleared. This will be indicated by "bits" being
* zero.
*/
vfta_delta = 0;
vlvf_update:
/* record pool change and enable VLAN ID if not already enabled */
wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
vfta_update:
/* bit was set/cleared before we started */
if (vfta_delta)
hw->mac.ops.write_vfta(hw, regidx, vfta);
return 0;
} }
/** /**
......
...@@ -56,8 +56,9 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, ...@@ -56,8 +56,9 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
void igb_clear_hw_cntrs_base(struct e1000_hw *hw); void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw); void igb_clear_vfta(struct e1000_hw *hw);
void igb_clear_vfta_i350(struct e1000_hw *hw); void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind,
bool vlan_on, bool vlvf_bypass);
void igb_config_collision_dist(struct e1000_hw *hw); void igb_config_collision_dist(struct e1000_hw *hw);
void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
void igb_mta_set(struct e1000_hw *hw, u32 hash_value); void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
......
...@@ -322,14 +322,20 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) ...@@ -322,14 +322,20 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
{ {
s32 ret_val = -E1000_ERR_MBX; s32 ret_val = -E1000_ERR_MBX;
u32 p2v_mailbox; u32 p2v_mailbox;
int count = 10;
do {
/* Take ownership of the buffer */ /* Take ownership of the buffer */
wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
/* reserve mailbox for vf use */ /* reserve mailbox for vf use */
p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
if (p2v_mailbox & E1000_P2VMAILBOX_PFU) if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
ret_val = 0; ret_val = 0;
break;
}
udelay(1000);
} while (count-- > 0);
return ret_val; return ret_val;
} }
......
...@@ -95,7 +95,6 @@ struct vf_data_storage { ...@@ -95,7 +95,6 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN]; unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes; u16 num_vf_mc_hashes;
u16 vlans_enabled;
u32 flags; u32 flags;
unsigned long last_nack; unsigned long last_nack;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
...@@ -482,6 +481,7 @@ struct igb_adapter { ...@@ -482,6 +481,7 @@ struct igb_adapter {
#define IGB_FLAG_MAS_ENABLE (1 << 12) #define IGB_FLAG_MAS_ENABLE (1 << 12)
#define IGB_FLAG_HAS_MSIX (1 << 13) #define IGB_FLAG_HAS_MSIX (1 << 13)
#define IGB_FLAG_EEE (1 << 14) #define IGB_FLAG_EEE (1 << 14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
/* Media Auto Sense */ /* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001 #define IGB_MAS_ENABLE_0 0X0001
......
...@@ -140,7 +140,7 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, ...@@ -140,7 +140,7 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats); struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int); static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *); static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter); static void igb_set_uta(struct igb_adapter *adapter, bool set);
static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr(int irq, void *);
static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *);
static irqreturn_t igb_msix_other(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *);
...@@ -1534,12 +1534,13 @@ static void igb_irq_enable(struct igb_adapter *adapter) ...@@ -1534,12 +1534,13 @@ static void igb_irq_enable(struct igb_adapter *adapter)
static void igb_update_mng_vlan(struct igb_adapter *adapter) static void igb_update_mng_vlan(struct igb_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u16 pf_id = adapter->vfs_allocated_count;
u16 vid = adapter->hw.mng_cookie.vlan_id; u16 vid = adapter->hw.mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id; u16 old_vid = adapter->mng_vlan_id;
if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
/* add VID to filter table */ /* add VID to filter table */
igb_vfta_set(hw, vid, true); igb_vfta_set(hw, vid, pf_id, true, true);
adapter->mng_vlan_id = vid; adapter->mng_vlan_id = vid;
} else { } else {
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
...@@ -1549,7 +1550,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) ...@@ -1549,7 +1550,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
(vid != old_vid) && (vid != old_vid) &&
!test_bit(old_vid, adapter->active_vlans)) { !test_bit(old_vid, adapter->active_vlans)) {
/* remove VID from filter table */ /* remove VID from filter table */
igb_vfta_set(hw, old_vid, false); igb_vfta_set(hw, vid, pf_id, false, true);
} }
} }
...@@ -1818,6 +1819,10 @@ void igb_down(struct igb_adapter *adapter) ...@@ -1818,6 +1819,10 @@ void igb_down(struct igb_adapter *adapter)
if (!pci_channel_offline(adapter->pdev)) if (!pci_channel_offline(adapter->pdev))
igb_reset(adapter); igb_reset(adapter);
/* clear VLAN promisc flag so VFTA will be updated if necessary */
adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
igb_clean_all_tx_rings(adapter); igb_clean_all_tx_rings(adapter);
igb_clean_all_rx_rings(adapter); igb_clean_all_rx_rings(adapter);
#ifdef CONFIG_IGB_DCA #ifdef CONFIG_IGB_DCA
...@@ -1862,7 +1867,7 @@ void igb_reset(struct igb_adapter *adapter) ...@@ -1862,7 +1867,7 @@ void igb_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac; struct e1000_mac_info *mac = &hw->mac;
struct e1000_fc_info *fc = &hw->fc; struct e1000_fc_info *fc = &hw->fc;
u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; u32 pba, hwm;
/* Repartition Pba for greater than 9k mtu /* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required. * To take effect CTRL.RST is required.
...@@ -1886,9 +1891,10 @@ void igb_reset(struct igb_adapter *adapter) ...@@ -1886,9 +1891,10 @@ void igb_reset(struct igb_adapter *adapter)
break; break;
} }
if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && if (mac->type == e1000_82575) {
(mac->type < e1000_82576)) { u32 min_rx_space, min_tx_space, needed_tx_space;
/* adjust PBA for jumbo frames */
/* write Rx PBA so that hardware can report correct Tx PBA */
wr32(E1000_PBA, pba); wr32(E1000_PBA, pba);
/* To maintain wire speed transmits, the Tx FIFO should be /* To maintain wire speed transmits, the Tx FIFO should be
...@@ -1898,31 +1904,26 @@ void igb_reset(struct igb_adapter *adapter) ...@@ -1898,31 +1904,26 @@ void igb_reset(struct igb_adapter *adapter)
* one full receive packet and is similarly rounded up and * one full receive packet and is similarly rounded up and
* expressed in KB. * expressed in KB.
*/ */
pba = rd32(E1000_PBA); min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16; /* The Tx FIFO also stores 16 bytes of information about the Tx
/* lower 16 bits has Rx packet buffer allocation size in KB */ * but don't include Ethernet FCS because hardware appends it.
pba &= 0xffff; * We only need to round down to the nearest 512 byte block
/* the Tx fifo also stores 16 bytes of information about the Tx * count since the value we care about is 2 frames, not 1.
* but don't include ethernet FCS because hardware appends it
*/ */
min_tx_space = (adapter->max_frame_size + min_tx_space = adapter->max_frame_size;
sizeof(union e1000_adv_tx_desc) - min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
ETH_FCS_LEN) * 2; min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
min_tx_space = ALIGN(min_tx_space, 1024);
min_tx_space >>= 10; /* upper 16 bits has Tx packet buffer allocation size in KB */
/* software strips receive CRC, so leave room for it */ needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
min_rx_space = adapter->max_frame_size;
min_rx_space = ALIGN(min_rx_space, 1024);
min_rx_space >>= 10;
/* If current Tx allocation is less than the min Tx FIFO size, /* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO * and the min Tx FIFO size is less than the current Rx FIFO
* allocation, take space away from current Rx allocation * allocation, take space away from current Rx allocation.
*/ */
if (tx_space < min_tx_space && if (needed_tx_space < pba) {
((min_tx_space - tx_space) < pba)) { pba -= needed_tx_space;
pba = pba - (min_tx_space - tx_space);
/* if short on Rx space, Rx wins and must trump Tx /* if short on Rx space, Rx wins and must trump Tx
* adjustment * adjustment
...@@ -1930,18 +1931,20 @@ void igb_reset(struct igb_adapter *adapter) ...@@ -1930,18 +1931,20 @@ void igb_reset(struct igb_adapter *adapter)
if (pba < min_rx_space) if (pba < min_rx_space)
pba = min_rx_space; pba = min_rx_space;
} }
/* adjust PBA for jumbo frames */
wr32(E1000_PBA, pba); wr32(E1000_PBA, pba);
} }
/* flow control settings */ /* flow control settings
/* The high water mark must be low enough to fit one full frame * The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO. * after transmitting the pause frame. As such we must have enough
* Set it to the lower of: * space to allow for us to complete our current transmit and then
* - 90% of the Rx FIFO size, or * receive the frame that is in progress from the link partner.
* - the full Rx FIFO size minus one full frame * Set it to:
* - the full Rx FIFO size minus one full Tx plus one full Rx frame
*/ */
hwm = min(((pba << 10) * 9 / 10), hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
((pba << 10) - 2 * adapter->max_frame_size));
fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16; fc->low_water = fc->high_water - 16;
...@@ -2051,7 +2054,7 @@ static int igb_set_features(struct net_device *netdev, ...@@ -2051,7 +2054,7 @@ static int igb_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
igb_vlan_mode(netdev, features); igb_vlan_mode(netdev, features);
if (!(changed & NETIF_F_RXALL)) if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0; return 0;
netdev->features = features; netdev->features = features;
...@@ -2064,6 +2067,25 @@ static int igb_set_features(struct net_device *netdev, ...@@ -2064,6 +2067,25 @@ static int igb_set_features(struct net_device *netdev,
return 0; return 0;
} }
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
u16 flags)
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
struct igb_adapter *adapter = netdev_priv(dev);
struct e1000_hw *hw = &adapter->hw;
int vfn = adapter->vfs_allocated_count;
int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
if (netdev_uc_count(dev) >= rar_entries)
return -ENOMEM;
}
return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
static const struct net_device_ops igb_netdev_ops = { static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open, .ndo_open = igb_open,
.ndo_stop = igb_close, .ndo_stop = igb_close,
...@@ -2087,6 +2109,7 @@ static const struct net_device_ops igb_netdev_ops = { ...@@ -2087,6 +2109,7 @@ static const struct net_device_ops igb_netdev_ops = {
#endif #endif
.ndo_fix_features = igb_fix_features, .ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features, .ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
.ndo_features_check = passthru_features_check, .ndo_features_check = passthru_features_check,
}; };
...@@ -2921,14 +2944,6 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, ...@@ -2921,14 +2944,6 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
/* Device supports enough interrupts without queue pairing. */ /* Device supports enough interrupts without queue pairing. */
break; break;
case e1000_82576: case e1000_82576:
/* If VFs are going to be allocated with RSS queues then we
* should pair the queues in order to conserve interrupts due
* to limited supply.
*/
if ((adapter->rss_queues > 1) &&
(adapter->vfs_allocated_count > 6))
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
/* fall through */
case e1000_82580: case e1000_82580:
case e1000_i350: case e1000_i350:
case e1000_i354: case e1000_i354:
...@@ -2939,6 +2954,8 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, ...@@ -2939,6 +2954,8 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
*/ */
if (adapter->rss_queues > (max_rss_queues / 2)) if (adapter->rss_queues > (max_rss_queues / 2))
adapter->flags |= IGB_FLAG_QUEUE_PAIRS; adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
else
adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
break; break;
} }
} }
...@@ -3498,7 +3515,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) ...@@ -3498,7 +3515,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
/* disable store bad packets and clear size bits. */ /* disable store bad packets and clear size bits. */
rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
/* enable LPE to prevent packets larger than max_frame_size */ /* enable LPE to allow for reception of jumbo frames */
rctl |= E1000_RCTL_LPE; rctl |= E1000_RCTL_LPE;
/* disable queue 0 to prevent tail write w/o re-config */ /* disable queue 0 to prevent tail write w/o re-config */
...@@ -3522,8 +3539,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) ...@@ -3522,8 +3539,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
E1000_RCTL_DPF | /* Allow filtered pause */
E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
/* Do not mess with E1000_CTRL_VME, it affects transmit as well, /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
* and that breaks VLANs. * and that breaks VLANs.
...@@ -3539,12 +3555,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, ...@@ -3539,12 +3555,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 vmolr; u32 vmolr;
/* if it isn't the PF check to see if VFs are enabled and if (size > MAX_JUMBO_FRAME_SIZE)
* increase the size to support vlan tags size = MAX_JUMBO_FRAME_SIZE;
*/
if (vfn < adapter->vfs_allocated_count &&
adapter->vf_data[vfn].vlans_enabled)
size += VLAN_TAG_SIZE;
vmolr = rd32(E1000_VMOLR(vfn)); vmolr = rd32(E1000_VMOLR(vfn));
vmolr &= ~E1000_VMOLR_RLPML_MASK; vmolr &= ~E1000_VMOLR_RLPML_MASK;
...@@ -3554,32 +3566,6 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, ...@@ -3554,32 +3566,6 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
return 0; return 0;
} }
/**
* igb_rlpml_set - set maximum receive packet size
* @adapter: board private structure
*
* Configure maximum receivable packet size.
**/
static void igb_rlpml_set(struct igb_adapter *adapter)
{
u32 max_frame_size = adapter->max_frame_size;
struct e1000_hw *hw = &adapter->hw;
u16 pf_id = adapter->vfs_allocated_count;
if (pf_id) {
igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
/* If we're in VMDQ or SR-IOV mode, then set global RLPML
* to our max jumbo frame size, in case we need to enable
* jumbo frames on one of the rings later.
* This will not pass over-length frames into the default
* queue because it's gated by the VMOLR.RLPML.
*/
max_frame_size = MAX_JUMBO_FRAME_SIZE;
}
wr32(E1000_RLPML, max_frame_size);
}
static inline void igb_set_vmolr(struct igb_adapter *adapter, static inline void igb_set_vmolr(struct igb_adapter *adapter,
int vfn, bool aupe) int vfn, bool aupe)
{ {
...@@ -3684,9 +3670,6 @@ static void igb_configure_rx(struct igb_adapter *adapter) ...@@ -3684,9 +3670,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
{ {
int i; int i;
/* set UTA to appropriate mode */
igb_set_uta(adapter);
/* set the correct pool for the PF default MAC address in entry 0 */ /* set the correct pool for the PF default MAC address in entry 0 */
igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
adapter->vfs_allocated_count); adapter->vfs_allocated_count);
...@@ -4004,6 +3987,130 @@ static int igb_write_uc_addr_list(struct net_device *netdev) ...@@ -4004,6 +3987,130 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
return count; return count;
} }
static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 i, pf_id;
switch (hw->mac.type) {
case e1000_i210:
case e1000_i211:
case e1000_i350:
/* VLAN filtering needed for VLAN prio filter */
if (adapter->netdev->features & NETIF_F_NTUPLE)
break;
/* fall through */
case e1000_82576:
case e1000_82580:
case e1000_i354:
/* VLAN filtering needed for pool filtering */
if (adapter->vfs_allocated_count)
break;
/* fall through */
default:
return 1;
}
/* We are already in VLAN promisc, nothing to do */
if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
return 0;
if (!adapter->vfs_allocated_count)
goto set_vfta;
/* Add PF to all active pools */
pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
u32 vlvf = rd32(E1000_VLVF(i));
vlvf |= 1 << pf_id;
wr32(E1000_VLVF(i), vlvf);
}
set_vfta:
/* Set all bits in the VLAN filter table array */
for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
hw->mac.ops.write_vfta(hw, i, ~0U);
/* Set flag so we don't redo unnecessary work */
adapter->flags |= IGB_FLAG_VLAN_PROMISC;
return 0;
}
#define VFTA_BLOCK_SIZE 8
static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
{
struct e1000_hw *hw = &adapter->hw;
u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
u32 vid_start = vfta_offset * 32;
u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
u32 i, vid, word, bits, pf_id;
/* guarantee that we don't scrub out management VLAN */
vid = adapter->mng_vlan_id;
if (vid >= vid_start && vid < vid_end)
vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
if (!adapter->vfs_allocated_count)
goto set_vfta;
pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
u32 vlvf = rd32(E1000_VLVF(i));
/* pull VLAN ID from VLVF */
vid = vlvf & VLAN_VID_MASK;
/* only concern ourselves with a certain range */
if (vid < vid_start || vid >= vid_end)
continue;
if (vlvf & E1000_VLVF_VLANID_ENABLE) {
/* record VLAN ID in VFTA */
vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
continue;
}
/* remove PF from the pool */
bits = ~(1 << pf_id);
bits &= rd32(E1000_VLVF(i));
wr32(E1000_VLVF(i), bits);
}
set_vfta:
/* extract values from active_vlans and write back to VFTA */
for (i = VFTA_BLOCK_SIZE; i--;) {
vid = (vfta_offset + i) * 32;
word = vid / BITS_PER_LONG;
bits = vid % BITS_PER_LONG;
vfta[i] |= adapter->active_vlans[word] >> bits;
hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
}
}
static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
{
u32 i;
/* We are not in VLAN promisc, nothing to do */
if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
return;
/* Set flag so we don't redo unnecessary work */
adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
igb_scrub_vfta(adapter, i);
}
/** /**
* igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -4018,21 +4125,17 @@ static void igb_set_rx_mode(struct net_device *netdev) ...@@ -4018,21 +4125,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
unsigned int vfn = adapter->vfs_allocated_count; unsigned int vfn = adapter->vfs_allocated_count;
u32 rctl, vmolr = 0; u32 rctl = 0, vmolr = 0;
int count; int count;
/* Check for Promiscuous and All Multicast modes */ /* Check for Promiscuous and All Multicast modes */
rctl = rd32(E1000_RCTL);
/* clear the effected bits */
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
if (netdev->flags & IFF_PROMISC) { if (netdev->flags & IFF_PROMISC) {
/* retain VLAN HW filtering if in VT mode */ rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
if (adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_MPME;
rctl |= E1000_RCTL_VFE;
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); /* enable use of UTA filter to force packets to default pool */
vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); if (hw->mac.type == e1000_82576)
vmolr |= E1000_VMOLR_ROPE;
} else { } else {
if (netdev->flags & IFF_ALLMULTI) { if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE; rctl |= E1000_RCTL_MPE;
...@@ -4050,6 +4153,8 @@ static void igb_set_rx_mode(struct net_device *netdev) ...@@ -4050,6 +4153,8 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= E1000_VMOLR_ROMPE; vmolr |= E1000_VMOLR_ROMPE;
} }
} }
}
/* Write addresses to available RAR registers, if there is not /* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable * sufficient space to store all the addresses then enable
* unicast promiscuous mode * unicast promiscuous mode
...@@ -4059,8 +4164,23 @@ static void igb_set_rx_mode(struct net_device *netdev) ...@@ -4059,8 +4164,23 @@ static void igb_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_UPE; rctl |= E1000_RCTL_UPE;
vmolr |= E1000_VMOLR_ROPE; vmolr |= E1000_VMOLR_ROPE;
} }
/* enable VLAN filtering by default */
rctl |= E1000_RCTL_VFE; rctl |= E1000_RCTL_VFE;
/* disable VLAN filtering for modes that require it */
if ((netdev->flags & IFF_PROMISC) ||
(netdev->features & NETIF_F_RXALL)) {
/* if we fail to set all rules then just clear VFE */
if (igb_vlan_promisc_enable(adapter))
rctl &= ~E1000_RCTL_VFE;
} else {
igb_vlan_promisc_disable(adapter);
} }
/* update state of unicast, multicast, and VLAN filtering modes */
rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
E1000_RCTL_VFE);
wr32(E1000_RCTL, rctl); wr32(E1000_RCTL, rctl);
/* In order to support SR-IOV and eventually VMDq it is necessary to set /* In order to support SR-IOV and eventually VMDq it is necessary to set
...@@ -4071,9 +4191,19 @@ static void igb_set_rx_mode(struct net_device *netdev) ...@@ -4071,9 +4191,19 @@ static void igb_set_rx_mode(struct net_device *netdev)
if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
return; return;
/* set UTA to appropriate mode */
igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
vmolr |= rd32(E1000_VMOLR(vfn)) & vmolr |= rd32(E1000_VMOLR(vfn)) &
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
/* enable Rx jumbo frames, no need for restriction */
vmolr &= ~E1000_VMOLR_RLPML_MASK;
vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
wr32(E1000_VMOLR(vfn), vmolr); wr32(E1000_VMOLR(vfn), vmolr);
wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
igb_restore_vf_multicasts(adapter); igb_restore_vf_multicasts(adapter);
} }
...@@ -5088,16 +5218,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, ...@@ -5088,16 +5218,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
if (test_bit(__IGB_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* The minimum packet size with TCTL.PSP set is 17 so pad the skb /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement. * in order to meet this minimum size requirement.
*/ */
...@@ -5792,125 +5912,132 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) ...@@ -5792,125 +5912,132 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 pool_mask, reg, vid; u32 pool_mask, vlvf_mask, i;
int i;
/* create mask for VF and other pools */
pool_mask = E1000_VLVF_POOLSEL_MASK;
vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); /* drop PF from pool bits */
pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
adapter->vfs_allocated_count));
/* Find the vlan filter for this id */ /* Find the vlan filter for this id */
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
reg = rd32(E1000_VLVF(i)); u32 vlvf = rd32(E1000_VLVF(i));
u32 vfta_mask, vid, vfta;
/* remove the vf from the pool */ /* remove the vf from the pool */
reg &= ~pool_mask; if (!(vlvf & vlvf_mask))
continue;
/* if pool is empty then remove entry from vfta */ /* clear out bit from VLVF */
if (!(reg & E1000_VLVF_POOLSEL_MASK) && vlvf ^= vlvf_mask;
(reg & E1000_VLVF_VLANID_ENABLE)) {
reg = 0;
vid = reg & E1000_VLVF_VLANID_MASK;
igb_vfta_set(hw, vid, false);
}
wr32(E1000_VLVF(i), reg); /* if other pools are present, just remove ourselves */
} if (vlvf & pool_mask)
goto update_vlvfb;
adapter->vf_data[vf].vlans_enabled = 0; /* if PF is present, leave VFTA */
} if (vlvf & E1000_VLVF_POOLSEL_MASK)
goto update_vlvf;
static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) vid = vlvf & E1000_VLVF_VLANID_MASK;
{ vfta_mask = 1 << (vid % 32);
struct e1000_hw *hw = &adapter->hw;
u32 reg, i;
/* The vlvf table only exists on 82576 hardware and newer */ /* clear bit from VFTA */
if (hw->mac.type < e1000_82576) vfta = adapter->shadow_vfta[vid / 32];
return -1; if (vfta & vfta_mask)
hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
update_vlvf:
/* clear pool selection enable */
if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
vlvf &= E1000_VLVF_POOLSEL_MASK;
else
vlvf = 0;
update_vlvfb:
/* clear pool bits */
wr32(E1000_VLVF(i), vlvf);
}
}
/* we only need to do this if VMDq is enabled */ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
if (!adapter->vfs_allocated_count) {
return -1; u32 vlvf;
int idx;
/* Find the vlan filter for this id */ /* short cut the special case */
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { if (vlan == 0)
reg = rd32(E1000_VLVF(i)); return 0;
if ((reg & E1000_VLVF_VLANID_ENABLE) &&
vid == (reg & E1000_VLVF_VLANID_MASK))
break;
}
if (add) { /* Search for the VLAN id in the VLVF entries */
if (i == E1000_VLVF_ARRAY_SIZE) { for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
/* Did not find a matching VLAN ID entry that was vlvf = rd32(E1000_VLVF(idx));
* enabled. Search for a free filter entry, i.e. if ((vlvf & VLAN_VID_MASK) == vlan)
* one without the enable bit set
*/
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
reg = rd32(E1000_VLVF(i));
if (!(reg & E1000_VLVF_VLANID_ENABLE))
break; break;
} }
}
if (i < E1000_VLVF_ARRAY_SIZE) {
/* Found an enabled/available entry */
reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
/* if !enabled we need to set this up in vfta */ return idx;
if (!(reg & E1000_VLVF_VLANID_ENABLE)) { }
/* add VID to filter table */
igb_vfta_set(hw, vid, true);
reg |= E1000_VLVF_VLANID_ENABLE;
}
reg &= ~E1000_VLVF_VLANID_MASK;
reg |= vid;
wr32(E1000_VLVF(i), reg);
/* do not modify RLPML for PF devices */ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
if (vf >= adapter->vfs_allocated_count) {
return 0; struct e1000_hw *hw = &adapter->hw;
u32 bits, pf_id;
int idx;
if (!adapter->vf_data[vf].vlans_enabled) { idx = igb_find_vlvf_entry(hw, vid);
u32 size; if (!idx)
return;
reg = rd32(E1000_VMOLR(vf)); /* See if any other pools are set for this VLAN filter
size = reg & E1000_VMOLR_RLPML_MASK; * entry other than the PF.
size += 4; */
reg &= ~E1000_VMOLR_RLPML_MASK; pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
reg |= size; bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
wr32(E1000_VMOLR(vf), reg); bits &= rd32(E1000_VLVF(idx));
/* Disable the filter so this falls into the default pool. */
if (!bits) {
if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
wr32(E1000_VLVF(idx), 1 << pf_id);
else
wr32(E1000_VLVF(idx), 0);
} }
}
adapter->vf_data[vf].vlans_enabled++; static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
} bool add, u32 vf)
} else { {
if (i < E1000_VLVF_ARRAY_SIZE) { int pf_id = adapter->vfs_allocated_count;
/* remove vf from the pool */ struct e1000_hw *hw = &adapter->hw;
reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); int err;
/* if pool is empty then remove entry from vfta */
if (!(reg & E1000_VLVF_POOLSEL_MASK)) { /* If VLAN overlaps with one the PF is currently monitoring make
reg = 0; * sure that we are able to allocate a VLVF entry. This may be
igb_vfta_set(hw, vid, false); * redundant but it guarantees PF will maintain visibility to
* the VLAN.
*/
if (add && test_bit(vid, adapter->active_vlans)) {
err = igb_vfta_set(hw, vid, pf_id, true, false);
if (err)
return err;
} }
wr32(E1000_VLVF(i), reg);
/* do not modify RLPML for PF devices */ err = igb_vfta_set(hw, vid, vf, add, false);
if (vf >= adapter->vfs_allocated_count)
return 0;
adapter->vf_data[vf].vlans_enabled--; if (add && !err)
if (!adapter->vf_data[vf].vlans_enabled) { return err;
u32 size;
reg = rd32(E1000_VMOLR(vf)); /* If we failed to add the VF VLAN or we are removing the VF VLAN
size = reg & E1000_VMOLR_RLPML_MASK; * we may need to drop the PF pool bit in order to allow us to free
size -= 4; * up the VLVF resources.
reg &= ~E1000_VMOLR_RLPML_MASK; */
reg |= size; if (test_bit(vid, adapter->active_vlans) ||
wr32(E1000_VMOLR(vf), reg); (adapter->flags & IGB_FLAG_VLAN_PROMISC))
} igb_update_pf_vlvf(adapter, vid);
}
} return err;
return 0;
} }
static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
...@@ -5923,20 +6050,23 @@ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) ...@@ -5923,20 +6050,23 @@ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
wr32(E1000_VMVIR(vf), 0); wr32(E1000_VMVIR(vf), 0);
} }
static int igb_ndo_set_vf_vlan(struct net_device *netdev, static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
int vf, u16 vlan, u8 qos) u16 vlan, u8 qos)
{ {
int err = 0; int err;
struct igb_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) err = igb_set_vf_vlan(adapter, vlan, true, vf);
return -EINVAL;
if (vlan || qos) {
err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
if (err) if (err)
goto out; return err;
igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
igb_set_vmolr(adapter, vf, !vlan); igb_set_vmolr(adapter, vf, !vlan);
/* revoke access to previous VLAN */
if (vlan != adapter->vf_data[vf].pf_vlan)
igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
false, vf);
adapter->vf_data[vf].pf_vlan = vlan; adapter->vf_data[vf].pf_vlan = vlan;
adapter->vf_data[vf].pf_qos = qos; adapter->vf_data[vf].pf_qos = qos;
dev_info(&adapter->pdev->dev, dev_info(&adapter->pdev->dev,
...@@ -5947,106 +6077,70 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, ...@@ -5947,106 +6077,70 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
dev_warn(&adapter->pdev->dev, dev_warn(&adapter->pdev->dev,
"Bring the PF device up before attempting to use the VF device.\n"); "Bring the PF device up before attempting to use the VF device.\n");
} }
} else {
igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
false, vf);
igb_set_vmvir(adapter, vlan, vf);
igb_set_vmolr(adapter, vf, true);
adapter->vf_data[vf].pf_vlan = 0;
adapter->vf_data[vf].pf_qos = 0;
}
out:
return err; return err;
} }
static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
{ {
struct e1000_hw *hw = &adapter->hw; /* Restore tagless access via VLAN 0 */
int i; igb_set_vf_vlan(adapter, 0, true, vf);
u32 reg;
/* Find the vlan filter for this id */ igb_set_vmvir(adapter, 0, vf);
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { igb_set_vmolr(adapter, vf, true);
reg = rd32(E1000_VLVF(i));
if ((reg & E1000_VLVF_VLANID_ENABLE) &&
vid == (reg & E1000_VLVF_VLANID_MASK))
break;
}
if (i >= E1000_VLVF_ARRAY_SIZE) /* Remove any PF assigned VLAN */
i = -1; if (adapter->vf_data[vf].pf_vlan)
igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
false, vf);
adapter->vf_data[vf].pf_vlan = 0;
adapter->vf_data[vf].pf_qos = 0;
return i; return 0;
} }
static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos)
{ {
struct e1000_hw *hw = &adapter->hw; struct igb_adapter *adapter = netdev_priv(netdev);
int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
int err = 0;
/* If in promiscuous mode we need to make sure the PF also has if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
* the VLAN filter set. return -EINVAL;
*/
if (add && (adapter->netdev->flags & IFF_PROMISC))
err = igb_vlvf_set(adapter, vid, add,
adapter->vfs_allocated_count);
if (err)
goto out;
err = igb_vlvf_set(adapter, vid, add, vf); return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
igb_disable_port_vlan(adapter, vf);
}
if (err) static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
goto out; {
int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
/* Go through all the checks to see if the VLAN filter should if (adapter->vf_data[vf].pf_vlan)
* be wiped completely. return -1;
*/
if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits;
int regndx = igb_find_vlvf_entry(adapter, vid);
if (regndx < 0) /* VLAN 0 is a special case, don't allow it to be removed */
goto out; if (!vid && !add)
/* See if any other pools are set for this VLAN filter return 0;
* entry other than the PF.
*/
vlvf = bits = rd32(E1000_VLVF(regndx));
bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
adapter->vfs_allocated_count);
/* If the filter was removed then ensure PF pool bit
* is cleared if the PF only added itself to the pool
* because the PF is in promiscuous mode.
*/
if ((vlvf & VLAN_VID_MASK) == vid &&
!test_bit(vid, adapter->active_vlans) &&
!bits)
igb_vlvf_set(adapter, vid, add,
adapter->vfs_allocated_count);
}
out: return igb_set_vf_vlan(adapter, vid, !!add, vf);
return err;
} }
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{ {
/* clear flags - except flag that indicates PF has set the MAC */ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
adapter->vf_data[vf].last_nack = jiffies;
/* reset offloads to defaults */ /* clear flags - except flag that indicates PF has set the MAC */
igb_set_vmolr(adapter, vf, true); vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
vf_data->last_nack = jiffies;
/* reset vlans for device */ /* reset vlans for device */
igb_clear_vf_vfta(adapter, vf); igb_clear_vf_vfta(adapter, vf);
if (adapter->vf_data[vf].pf_vlan) igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
igb_ndo_set_vf_vlan(adapter->netdev, vf, igb_set_vmvir(adapter, vf_data->pf_vlan |
adapter->vf_data[vf].pf_vlan, (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
adapter->vf_data[vf].pf_qos); igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
else
igb_clear_vf_vfta(adapter, vf);
/* reset multicast table array for vf */ /* reset multicast table array for vf */
adapter->vf_data[vf].num_vf_mc_hashes = 0; adapter->vf_data[vf].num_vf_mc_hashes = 0;
...@@ -6191,7 +6285,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) ...@@ -6191,7 +6285,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
"VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
vf); vf);
else else
retval = igb_set_vf_vlan(adapter, msgbuf, vf); retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
break; break;
default: default:
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
...@@ -6233,6 +6327,7 @@ static void igb_msg_task(struct igb_adapter *adapter) ...@@ -6233,6 +6327,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
/** /**
* igb_set_uta - Set unicast filter table address * igb_set_uta - Set unicast filter table address
* @adapter: board private structure * @adapter: board private structure
* @set: boolean indicating if we are setting or clearing bits
* *
* The unicast table address is a register array of 32-bit registers. * The unicast table address is a register array of 32-bit registers.
* The table is meant to be used in a way similar to how the MTA is used * The table is meant to be used in a way similar to how the MTA is used
...@@ -6240,21 +6335,18 @@ static void igb_msg_task(struct igb_adapter *adapter) ...@@ -6240,21 +6335,18 @@ static void igb_msg_task(struct igb_adapter *adapter)
* set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
* enable bit to allow vlan tag stripping when promiscuous mode is enabled * enable bit to allow vlan tag stripping when promiscuous mode is enabled
**/ **/
static void igb_set_uta(struct igb_adapter *adapter) static void igb_set_uta(struct igb_adapter *adapter, bool set)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 uta = set ? ~0 : 0;
int i; int i;
/* The UTA table only exists on 82576 hardware and newer */
if (hw->mac.type < e1000_82576)
return;
/* we only need to do this if VMDq is enabled */ /* we only need to do this if VMDq is enabled */
if (!adapter->vfs_allocated_count) if (!adapter->vfs_allocated_count)
return; return;
for (i = 0; i < hw->mac.uta_reg_count; i++) for (i = hw->mac.uta_reg_count; i--;)
array_wr32(E1000_UTA, i, ~0); array_wr32(E1000_UTA, i, uta);
} }
/** /**
...@@ -7201,8 +7293,6 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) ...@@ -7201,8 +7293,6 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
ctrl &= ~E1000_CTRL_VME; ctrl &= ~E1000_CTRL_VME;
wr32(E1000_CTRL, ctrl); wr32(E1000_CTRL, ctrl);
} }
igb_rlpml_set(adapter);
} }
static int igb_vlan_rx_add_vid(struct net_device *netdev, static int igb_vlan_rx_add_vid(struct net_device *netdev,
...@@ -7212,11 +7302,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, ...@@ -7212,11 +7302,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int pf_id = adapter->vfs_allocated_count; int pf_id = adapter->vfs_allocated_count;
/* attempt to add filter to vlvf array */
igb_vlvf_set(adapter, vid, true, pf_id);
/* add the filter since PF can receive vlans w/o entry in vlvf */ /* add the filter since PF can receive vlans w/o entry in vlvf */
igb_vfta_set(hw, vid, true); if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
igb_vfta_set(hw, vid, pf_id, true, !!vid);
set_bit(vid, adapter->active_vlans); set_bit(vid, adapter->active_vlans);
...@@ -7227,16 +7315,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, ...@@ -7227,16 +7315,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid) __be16 proto, u16 vid)
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int pf_id = adapter->vfs_allocated_count; int pf_id = adapter->vfs_allocated_count;
s32 err; struct e1000_hw *hw = &adapter->hw;
/* remove vlan from VLVF table array */
err = igb_vlvf_set(adapter, vid, false, pf_id);
/* if vid was not present in VLVF just remove it from table */ /* remove VID from filter table */
if (err) if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
igb_vfta_set(hw, vid, false); igb_vfta_set(hw, vid, pf_id, false, true);
clear_bit(vid, adapter->active_vlans); clear_bit(vid, adapter->active_vlans);
...@@ -7245,11 +7329,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, ...@@ -7245,11 +7329,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev,
static void igb_restore_vlan(struct igb_adapter *adapter) static void igb_restore_vlan(struct igb_adapter *adapter)
{ {
u16 vid; u16 vid = 1;
igb_vlan_mode(adapter->netdev, adapter->netdev->features); igb_vlan_mode(adapter->netdev, adapter->netdev->features);
igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
} }
...@@ -7704,15 +7789,14 @@ static void igb_io_resume(struct pci_dev *pdev) ...@@ -7704,15 +7789,14 @@ static void igb_io_resume(struct pci_dev *pdev)
static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
u8 qsel) u8 qsel)
{ {
u32 rar_low, rar_high;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 rar_low, rar_high;
/* HW expects these in little endian so we reverse the byte order /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian * from network order (big endian) to CPU endian
*/ */
rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | rar_low = le32_to_cpup((__be32 *)(addr));
((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = le16_to_cpup((__be16 *)(addr + 4));
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
/* Indicate to hardware the Address is Valid. */ /* Indicate to hardware the Address is Valid. */
rar_high |= E1000_RAH_AV; rar_high |= E1000_RAH_AV;
...@@ -7959,8 +8043,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) ...@@ -7959,8 +8043,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
* than the Rx threshold. Set hwm to PBA - max frame * than the Rx threshold. Set hwm to PBA - max frame
* size in 16B units, capping it at PBA - 6KB. * size in 16B units, capping it at PBA - 6KB.
*/ */
hwm = 64 * pba - adapter->max_frame_size / 16;
if (hwm < 64 * (pba - 6))
hwm = 64 * (pba - 6); hwm = 64 * (pba - 6);
reg = rd32(E1000_FCRTC); reg = rd32(E1000_FCRTC);
reg &= ~E1000_FCRTC_RTH_COAL_MASK; reg &= ~E1000_FCRTC_RTH_COAL_MASK;
...@@ -7971,8 +8053,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) ...@@ -7971,8 +8053,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* Set the DMA Coalescing Rx threshold to PBA - 2 * max /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
* frame size, capping it at PBA - 10KB. * frame size, capping it at PBA - 10KB.
*/ */
dmac_thr = pba - adapter->max_frame_size / 512;
if (dmac_thr < pba - 10)
dmac_thr = pba - 10; dmac_thr = pba - 10;
reg = rd32(E1000_DMACR); reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK; reg &= ~E1000_DMACR_DMACTHR_MASK;
......
...@@ -234,13 +234,19 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) ...@@ -234,13 +234,19 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
{ {
s32 ret_val = -E1000_ERR_MBX; s32 ret_val = -E1000_ERR_MBX;
int count = 10;
do {
/* Take ownership of the buffer */ /* Take ownership of the buffer */
ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
/* reserve mailbox for VF use */ /* reserve mailbox for VF use */
if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) {
ret_val = E1000_SUCCESS; ret_val = 0;
break;
}
udelay(1000);
} while (count-- > 0);
return ret_val; return ret_val;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment