Commit c8fdc324 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-10-19

This series contains updates to i40e and i40evf only.

Kiran adds a spinlock around code accessing VSI MAC filter list to
ensure that we are synchronizing access to the filter list, otherwise
we can end up with multiple accesses at the same time which can cause
the VSI MAC filter list to get in an unstable or corrupted state.

Jesse fixes overlong BIT defines, where the RSS enabling call were
mistakenly missed.  Also fixes a bug where the enable function was
enabling the interrupt twice while trying to update the two interrupt
throttle rate thresholds for Rx and Tx, while refactoring the IRQ
enable function to simplify reading the flow.  Addressed the high
CPU utilization of some small streaming workloads that the driver should
reduce CPU in.

Anjali fixes two X722 issues with respect to EEPROM checksum verify and
reading NVM version info.  Fixed where a mask value was accidentally
replaced with a bit mask causing Flow Director sideband to be broken.

Alex Duyck fixes areas of the drivers which run from hard interrupt
context or with interrupts already disabled in netpoll, so use
napi_schedule_irqoff() instead of napi_schedule().

Mitch fixes the VF drivers to not easily give up when it is not able
to communicate with the PF driver.

Carolyn fixes a problem where our tools MAC loopback test, after driver
unbind would fail because the hardware was configured for multiqueue and
unbind operation did not clear this configuration.  Also fixed a issue
where the NVMUpdate tool gets bad data from the PHY when using the PHY
NVM feature because of contention on the MDIO interface from getting
PHY capability calls from the driver during regular operations.

Catherine fixed an issue where we were checking if autoneg was allowed
to change before checking if autoneg was changing, these checks need to
be in the reverse order.

Jean Sacren fixes up an function header comment to align the kernel-docs
with the actual code.

v2: Cleaned up the use of spin_is_locked() in patch 1 based on feedback
    from David Miller, since it always evaluates to zero on uni-processor
    builds
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 26440c83 a1f192cf
...@@ -468,6 +468,8 @@ struct i40e_vsi { ...@@ -468,6 +468,8 @@ struct i40e_vsi {
#define I40E_VSI_FLAG_VEB_OWNER BIT(1) #define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags; unsigned long flags;
/* Per VSI lock to protect elements/list (MAC filter) */
spinlock_t mac_filter_list_lock;
struct list_head mac_filter_list; struct list_head mac_filter_list;
/* VSI stats */ /* VSI stats */
...@@ -575,6 +577,8 @@ struct i40e_q_vector { ...@@ -575,6 +577,8 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN]; char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state; bool arm_wb_state;
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 should adjust ITR */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* lan device */ /* lan device */
......
...@@ -958,6 +958,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) ...@@ -958,6 +958,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
else else
hw->pf_id = (u8)(func_rid & 0x7); hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
status = i40e_init_nvm(hw); status = i40e_init_nvm(hw);
return status; return status;
} }
...@@ -2275,13 +2278,15 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) ...@@ -2275,13 +2278,15 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
if (status) if (status)
return status; return status;
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
NULL); status = i40e_aq_get_phy_capabilities(hw, false, false,
if (status) &abilities, NULL);
return status; if (status)
return status;
memcpy(hw->phy.link_info.module_type, &abilities.module_type, memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type)); sizeof(hw->phy.link_info.module_type));
}
return status; return status;
} }
......
...@@ -661,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev, ...@@ -661,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev,
/* Check autoneg */ /* Check autoneg */
if (autoneg == AUTONEG_ENABLE) { if (autoneg == AUTONEG_ENABLE) {
/* If autoneg is not supported, return error */
if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
netdev_info(netdev, "Autoneg not supported on this phy\n");
return -EINVAL;
}
/* If autoneg was not already enabled */ /* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
/* If autoneg is not supported, return error */
if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
netdev_info(netdev, "Autoneg not supported on this phy\n");
return -EINVAL;
}
/* Autoneg is allowed to change */
config.abilities = abilities.abilities | config.abilities = abilities.abilities |
I40E_AQ_PHY_ENABLE_AN; I40E_AQ_PHY_ENABLE_AN;
change = true; change = true;
} }
} else { } else {
/* If autoneg is supported 10GBASE_T is the only phy that
* can disable it, so otherwise return error
*/
if (safe_ecmd.supported & SUPPORTED_Autoneg &&
hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
return -EINVAL;
}
/* If autoneg is currently enabled */ /* If autoneg is currently enabled */
if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
/* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
if (safe_ecmd.supported & SUPPORTED_Autoneg &&
hw->phy.link_info.phy_type !=
I40E_PHY_TYPE_10GBASE_T) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
return -EINVAL;
}
/* Autoneg is allowed to change */
config.abilities = abilities.abilities & config.abilities = abilities.abilities &
~I40E_AQ_PHY_ENABLE_AN; ~I40E_AQ_PHY_ENABLE_AN;
change = true; change = true;
...@@ -748,9 +751,9 @@ static int i40e_set_settings(struct net_device *netdev, ...@@ -748,9 +751,9 @@ static int i40e_set_settings(struct net_device *netdev,
status = i40e_update_link_info(hw); status = i40e_update_link_info(hw);
if (status) if (status)
netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n", netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
i40e_stat_str(hw, status), i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_aq_str(hw, hw->aq.asq_last_status));
} else { } else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
......
...@@ -1516,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) ...@@ -1516,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
* same PCI function. * same PCI function.
*/ */
netdev->dev_port = 1; netdev->dev_port = 1;
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
/* use san mac */ /* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
......
This diff is collapsed.
...@@ -290,9 +290,18 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, ...@@ -290,9 +290,18 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data) u16 *data)
{ {
if (hw->mac.type == I40E_MAC_X722) enum i40e_status_code ret_code = 0;
return i40e_read_nvm_word_aq(hw, offset, data);
return i40e_read_nvm_word_srctl(hw, offset, data); if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
i40e_release_nvm(hw);
}
} else {
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
return ret_code;
} }
/** /**
...@@ -397,9 +406,19 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, ...@@ -397,9 +406,19 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data) u16 *words, u16 *data)
{ {
if (hw->mac.type == I40E_MAC_X722) enum i40e_status_code ret_code = 0;
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
return i40e_read_nvm_buffer_srctl(hw, offset, words, data); if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
data);
i40e_release_nvm(hw);
}
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
return ret_code;
} }
/** /**
......
...@@ -815,6 +815,8 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) ...@@ -815,6 +815,8 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* i40e_set_new_dynamic_itr - Find new ITR level * i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data * @rc: structure containing ring performance data
* *
* Returns true if ITR changed, false if not
*
* Stores a new ITR value based on packets and byte counts during * Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation * the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic * is faster updates and more accurate ITR for the current traffic
...@@ -823,21 +825,32 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) ...@@ -823,21 +825,32 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* testing data as well as attempting to minimize response time * testing data as well as attempting to minimize response time
* while increasing bulk throughput. * while increasing bulk throughput.
**/ **/
static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{ {
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_int;
int usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return; return false;
/* simple throttlerate management /* simple throttlerate management
* 0-10MB/s lowest (100000 ints/s) * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s) * 10-20MB/s low (20000 ints/s)
* 20-1249MB/s bulk (8000 ints/s) * 20-1249MB/s bulk (18000 ints/s)
* > 40000 Rx packets per second (8000 ints/s)
*
* The math works out because the divisor is in 10^(-6) which
* turns the bytes/us input value into MB/s values, but
* make sure to use usecs, as the register values written
* are in 2 usec increments in the ITR registers, and make sure
* to use the smoothed values that the countdown timer gives us.
*/ */
bytes_per_int = rc->total_bytes / rc->itr; usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_int > 10)
...@@ -850,35 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -850,35 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY; new_latency_range = I40E_LOWEST_LATENCY;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
if (bytes_per_int <= 20) case I40E_ULTRA_LATENCY:
new_latency_range = I40E_LOW_LATENCY;
break;
default: default:
if (bytes_per_int <= 20) if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
} }
/* this is to adjust RX more aggressively when streaming small
* packets. The value of 40000 was picked as it is just beyond
* what the hardware can receive per second if in low latency
* mode.
*/
#define RX_ULTRA_PACKET_RATE 40000
if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
(&qv->rx == rc))
new_latency_range = I40E_ULTRA_LATENCY;
rc->latency_range = new_latency_range; rc->latency_range = new_latency_range;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
new_itr = I40E_ITR_100K; new_itr = I40E_ITR_50K;
break; break;
case I40E_LOW_LATENCY: case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K; new_itr = I40E_ITR_20K;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
new_itr = I40E_ITR_18K;
break;
case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K; new_itr = I40E_ITR_8K;
break; break;
default: default:
break; break;
} }
if (new_itr != rc->itr)
rc->itr = new_itr;
rc->total_bytes = 0; rc->total_bytes = 0;
rc->total_packets = 0; rc->total_packets = 0;
if (new_itr != rc->itr) {
rc->itr = new_itr;
return true;
}
return false;
} }
/** /**
...@@ -1747,6 +1777,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1747,6 +1777,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
return total_rx_packets; return total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr)
{
u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
return val;
}
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
/** /**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about * @vsi: the VSI we care about
...@@ -1757,54 +1802,69 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1757,54 +1802,69 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector) struct i40e_q_vector *q_vector)
{ {
struct i40e_hw *hw = &vsi->back->hw; struct i40e_hw *hw = &vsi->back->hw;
u16 old_itr; bool rx = false, tx = false;
u32 rxval, txval;
int vector; int vector;
u32 val;
vector = (q_vector->v_idx + vsi->base_vector); vector = (q_vector->v_idx + vsi->base_vector);
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
!ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
goto enable_int;
}
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
old_itr = q_vector->rx.itr; rx = i40e_set_new_dynamic_itr(&q_vector->rx);
i40e_set_new_dynamic_itr(&q_vector->rx); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
if (old_itr != q_vector->rx.itr) {
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_RX_ITR <<
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(q_vector->rx.itr <<
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
} else {
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE <<
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
}
if (!test_bit(__I40E_DOWN, &vsi->state))
wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
} else {
i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
} }
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
old_itr = q_vector->tx.itr; tx = i40e_set_new_dynamic_itr(&q_vector->tx);
i40e_set_new_dynamic_itr(&q_vector->tx); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
if (old_itr != q_vector->tx.itr) {
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_TX_ITR <<
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(q_vector->tx.itr <<
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
} else {
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE <<
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
}
if (!test_bit(__I40E_DOWN, &vsi->state))
wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
vsi->base_vector - 1), val);
} else {
i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
} }
if (rx || tx) {
/* get the higher of the two ITR adjustments and
* use the same value for both ITR registers
* when in adaptive mode (Rx and/or Tx)
*/
u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
q_vector->tx.itr = q_vector->rx.itr = itr;
txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
tx = true;
rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
rx = true;
}
/* only need to enable the interrupt once, but need
* to possibly update both ITR values
*/
if (rx) {
/* set the INTENA_MSK_MASK so that this first write
* won't actually enable the interrupt, instead just
* updating the ITR (it's bit 31 PF and VF)
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
wr32(hw, INTREG(vector - 1), rxval);
}
enable_int:
if (!test_bit(__I40E_DOWN, &vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
else
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} }
/** /**
......
...@@ -32,12 +32,14 @@ ...@@ -32,12 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005 #define I40E_ITR_100K 0x0005
#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019 #define I40E_ITR_20K 0x0019
#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E #define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A #define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
#define I40E_ITR_RX_DEF I40E_ITR_8K #define I40E_ITR_RX_DEF I40E_ITR_20K
#define I40E_ITR_TX_DEF I40E_ITR_4K #define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
...@@ -296,6 +298,7 @@ enum i40e_latency_range { ...@@ -296,6 +298,7 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0, I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1, I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2, I40E_BULK_LATENCY = 2,
I40E_ULTRA_LATENCY = 3,
}; };
struct i40e_ring_container { struct i40e_ring_container {
......
...@@ -544,6 +544,9 @@ struct i40e_hw { ...@@ -544,6 +544,9 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
/* debug mask */ /* debug mask */
u32 debug_mask; u32 debug_mask;
char err_str[16]; char err_str[16];
...@@ -1065,8 +1068,8 @@ enum i40e_filter_program_desc_fd_status { ...@@ -1065,8 +1068,8 @@ enum i40e_filter_program_desc_fd_status {
}; };
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ #define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
......
...@@ -547,6 +547,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) ...@@ -547,6 +547,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
*/ */
if (vf->port_vlan_id) if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id); i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr, f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1, vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false); true, false);
...@@ -559,6 +561,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) ...@@ -559,6 +561,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (!f) if (!f)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n"); "Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_list_lock);
} }
/* program mac filter */ /* program mac filter */
...@@ -1598,6 +1601,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1598,6 +1601,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
} }
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
/* Lock once, because all function inside for loop accesses VSI's
* MAC filter list which needs to be protected using same lock.
*/
spin_lock_bh(&vsi->mac_filter_list_lock);
/* add new addresses to the list */ /* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) { for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
...@@ -1616,9 +1624,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1616,9 +1624,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n"); "Unable to add VF MAC filter\n");
ret = I40E_ERR_PARAM; ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param; goto error_param;
} }
} }
spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi, false)) if (i40e_sync_vsi_filters(vsi, false))
...@@ -1666,10 +1676,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1666,10 +1676,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
} }
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_list_lock);
/* delete addresses from the list */ /* delete addresses from the list */
for (i = 0; i < al->num_elements; i++) for (i = 0; i < al->num_elements; i++)
i40e_del_filter(vsi, al->list[i].addr, i40e_del_filter(vsi, al->list[i].addr,
I40E_VLAN_ANY, true, false); I40E_VLAN_ANY, true, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi, false)) if (i40e_sync_vsi_filters(vsi, false))
...@@ -2066,6 +2078,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2066,6 +2078,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param; goto error_param;
} }
/* Lock once because below invoked function add/del_filter requires
* mac_filter_list_lock to be held
*/
spin_lock_bh(&vsi->mac_filter_list_lock);
/* delete the temporary mac address */ /* delete the temporary mac address */
i40e_del_filter(vsi, vf->default_lan_addr.addr, i40e_del_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1, vf->port_vlan_id ? vf->port_vlan_id : -1,
...@@ -2077,6 +2094,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2077,6 +2094,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
list_for_each_entry(f, &vsi->mac_filter_list, list) list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */ /* program mac filter */
if (i40e_sync_vsi_filters(vsi, false)) { if (i40e_sync_vsi_filters(vsi, false)) {
...@@ -2109,6 +2128,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, ...@@ -2109,6 +2128,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back; struct i40e_pf *pf = np->vsi->back;
bool is_vsi_in_vlan = false;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
struct i40e_vf *vf; struct i40e_vf *vf;
int ret = 0; int ret = 0;
...@@ -2138,7 +2158,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, ...@@ -2138,7 +2158,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
/* duplicate request, so just return success */ /* duplicate request, so just return success */
goto error_pvid; goto error_pvid;
if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { spin_lock_bh(&vsi->mac_filter_list_lock);
is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock);
if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id); vf_id);
......
...@@ -443,9 +443,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, ...@@ -443,9 +443,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status; return status;
...@@ -520,8 +517,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, ...@@ -520,8 +517,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
......
...@@ -318,6 +318,8 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector ...@@ -318,6 +318,8 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector
* i40e_set_new_dynamic_itr - Find new ITR level * i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data * @rc: structure containing ring performance data
* *
* Returns true if ITR changed, false if not
*
* Stores a new ITR value based on packets and byte counts during * Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation * the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic * is faster updates and more accurate ITR for the current traffic
...@@ -326,21 +328,32 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector ...@@ -326,21 +328,32 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector
* testing data as well as attempting to minimize response time * testing data as well as attempting to minimize response time
* while increasing bulk throughput. * while increasing bulk throughput.
**/ **/
static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{ {
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_int;
int usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return; return false;
/* simple throttlerate management /* simple throttlerate management
* 0-10MB/s lowest (100000 ints/s) * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s) * 10-20MB/s low (20000 ints/s)
* 20-1249MB/s bulk (8000 ints/s) * 20-1249MB/s bulk (18000 ints/s)
* > 40000 Rx packets per second (8000 ints/s)
*
* The math works out because the divisor is in 10^(-6) which
* turns the bytes/us input value into MB/s values, but
* make sure to use usecs, as the register values written
* are in 2 usec increments in the ITR registers, and make sure
* to use the smoothed values that the countdown timer gives us.
*/ */
bytes_per_int = rc->total_bytes / rc->itr; usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_int > 10)
...@@ -353,35 +366,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -353,35 +366,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY; new_latency_range = I40E_LOWEST_LATENCY;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
if (bytes_per_int <= 20) case I40E_ULTRA_LATENCY:
new_latency_range = I40E_LOW_LATENCY;
break;
default: default:
if (bytes_per_int <= 20) if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
} }
/* this is to adjust RX more aggressively when streaming small
* packets. The value of 40000 was picked as it is just beyond
* what the hardware can receive per second if in low latency
* mode.
*/
#define RX_ULTRA_PACKET_RATE 40000
if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
(&qv->rx == rc))
new_latency_range = I40E_ULTRA_LATENCY;
rc->latency_range = new_latency_range; rc->latency_range = new_latency_range;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
new_itr = I40E_ITR_100K; new_itr = I40E_ITR_50K;
break; break;
case I40E_LOW_LATENCY: case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K; new_itr = I40E_ITR_20K;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
new_itr = I40E_ITR_18K;
break;
case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K; new_itr = I40E_ITR_8K;
break; break;
default: default:
break; break;
} }
if (new_itr != rc->itr)
rc->itr = new_itr;
rc->total_bytes = 0; rc->total_bytes = 0;
rc->total_packets = 0; rc->total_packets = 0;
if (new_itr != rc->itr) {
rc->itr = new_itr;
return true;
}
return false;
} }
/* /*
...@@ -1187,6 +1217,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1187,6 +1217,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
return total_rx_packets; return total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr)
{
u32 val;
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
(type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
(itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
return val;
}
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
/** /**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about * @vsi: the VSI we care about
...@@ -1197,55 +1242,67 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1197,55 +1242,67 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector) struct i40e_q_vector *q_vector)
{ {
struct i40e_hw *hw = &vsi->back->hw; struct i40e_hw *hw = &vsi->back->hw;
u16 old_itr; bool rx = false, tx = false;
u32 rxval, txval;
int vector; int vector;
u32 val;
vector = (q_vector->v_idx + vsi->base_vector); vector = (q_vector->v_idx + vsi->base_vector);
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
!ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
goto enable_int;
}
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
old_itr = q_vector->rx.itr; rx = i40e_set_new_dynamic_itr(&q_vector->rx);
i40e_set_new_dynamic_itr(&q_vector->rx); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
if (old_itr != q_vector->rx.itr) {
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
(I40E_RX_ITR <<
I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
(q_vector->rx.itr <<
I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
} else {
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
(I40E_ITR_NONE <<
I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
}
if (!test_bit(__I40E_DOWN, &vsi->state))
wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
} else {
i40evf_irq_enable_queues(vsi->back, 1
<< q_vector->v_idx);
} }
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
old_itr = q_vector->tx.itr; tx = i40e_set_new_dynamic_itr(&q_vector->tx);
i40e_set_new_dynamic_itr(&q_vector->tx); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
if (old_itr != q_vector->tx.itr) { }
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | if (rx || tx) {
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | /* get the higher of the two ITR adjustments and
(I40E_TX_ITR << * use the same value for both ITR registers
I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | * when in adaptive mode (Rx and/or Tx)
(q_vector->tx.itr << */
I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
} else { q_vector->tx.itr = q_vector->rx.itr = itr;
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | tx = true;
(I40E_ITR_NONE << rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); rx = true;
}
if (!test_bit(__I40E_DOWN, &vsi->state))
wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
} else {
i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
} }
/* only need to enable the interrupt once, but need
* to possibly update both ITR values
*/
if (rx) {
/* set the INTENA_MSK_MASK so that this first write
* won't actually enable the interrupt, instead just
* updating the ITR (it's bit 31 PF and VF)
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
wr32(hw, INTREG(vector - 1), rxval);
}
enable_int:
if (!test_bit(__I40E_DOWN, &vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
else
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} }
/** /**
......
...@@ -32,12 +32,14 @@ ...@@ -32,12 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005 #define I40E_ITR_100K 0x0005
#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019 #define I40E_ITR_20K 0x0019
#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E #define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A #define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
#define I40E_ITR_RX_DEF I40E_ITR_8K #define I40E_ITR_RX_DEF I40E_ITR_20K
#define I40E_ITR_TX_DEF I40E_ITR_4K #define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
...@@ -89,16 +91,16 @@ enum i40e_dyn_idx_t { ...@@ -89,16 +91,16 @@ enum i40e_dyn_idx_t {
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
#define i40e_pf_get_default_rss_hena(pf) \ #define i40e_pf_get_default_rss_hena(pf) \
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_512 512 /* Used for packet split */
...@@ -291,6 +293,7 @@ enum i40e_latency_range { ...@@ -291,6 +293,7 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0, I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1, I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2, I40E_BULK_LATENCY = 2,
I40E_ULTRA_LATENCY = 3,
}; };
struct i40e_ring_container { struct i40e_ring_container {
......
...@@ -1055,8 +1055,8 @@ enum i40e_filter_program_desc_fd_status { ...@@ -1055,8 +1055,8 @@ enum i40e_filter_program_desc_fd_status {
}; };
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ #define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
......
...@@ -112,6 +112,8 @@ struct i40e_q_vector { ...@@ -112,6 +112,8 @@ struct i40e_q_vector {
struct i40e_ring_container tx; struct i40e_ring_container tx;
u32 ring_mask; u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */ u8 num_ringpairs; /* total number of ring pairs in vector */
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */
int v_idx; /* vector index in list */ int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 9];
bool arm_wb_state; bool arm_wb_state;
......
...@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; ...@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] = static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver"; "Intel(R) XL710/X710 Virtual Function Network Driver";
#define DRV_VERSION "1.3.21" #define DRV_VERSION "1.3.25"
const char i40evf_driver_version[] = DRV_VERSION; const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] = static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2015 Intel Corporation."; "Copyright (c) 2013 - 2015 Intel Corporation.";
...@@ -334,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) ...@@ -334,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
if (!q_vector->tx.ring && !q_vector->rx.ring) if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED; return IRQ_HANDLED;
napi_schedule(&q_vector->napi); napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -357,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) ...@@ -357,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring; q_vector->rx.ring = rx_ring;
q_vector->rx.count++; q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY; q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} }
/** /**
...@@ -377,6 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) ...@@ -377,6 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring; q_vector->tx.ring = tx_ring;
q_vector->tx.count++; q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY; q_vector->tx.latency_range = I40E_LOW_LATENCY;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++; q_vector->num_ringpairs++;
q_vector->ring_mask |= BIT(t_idx); q_vector->ring_mask |= BIT(t_idx);
} }
...@@ -2357,9 +2359,12 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2357,9 +2359,12 @@ static void i40evf_init_task(struct work_struct *work)
err: err:
/* Things went into the weeds, so try again later */ /* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n"); dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */ i40evf_shutdown_adminq(hw);
adapter->state = __I40EVF_STARTUP;
schedule_delayed_work(&adapter->init_task, HZ * 5);
return;
} }
schedule_delayed_work(&adapter->init_task, HZ); schedule_delayed_work(&adapter->init_task, HZ);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment