Commit fe0acb5f authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-09-24

This series contains updates to i40e and i40evf only.

Harshitha removes the ability to set or advertise X722 to 100 Mbps,
since it is not supported, so we should not be able to advertise or
set the NIC to 100 Mbps.

Alan fixes an issue where deleting a MAC filter did not really delete the
filter in question.  The reason being that the wrong cmd_flag is passed to
the firmware.

Preethi adds the encapsulation checksum offload negotiation flag, so that
we can control it.

Jake cleans up the ATR auto_disable_flags use, since some locations
disable ATR accidentally using the "full" disable by disabling the flag
in the standard flags field.  This permanently forces ATR off instead of
temporarily disabling it.  Then updated checks to include when there are
TCP/IP4 sideband rules in effect, where ATR should be disabled.  Lastly,
adds support to the i40evf driver for setting interrupt moderation values
per queue, like in i40e.

Henry cleans up unreachable code, since i40e_shutdown_adminq() is always
true.

Mitch enables support for adaptive interrupt throttling, since all the
code for it is already in the interrupt handler.  The fixes a rare
case where we might get a VSI with no queues and we try to configure
RSS, which would result in a divide by zero.

Alex fixes an issue where transmit cleanup flow was incorrectly assuming
it could check for the flow director bits after it had unmapped the
buffer.  Then adds a txring_txq() to allow us to convert a i40e_ring/
i40evf_ring to a netdev_tx_queue structure, like ixgbe and fm10k.  This
avoids having to make a multi-line function call for all the areas that
need access to it.  Re-factors the Flow Director filter configuration
out into a separate function, like we did for the standard xmit path.
Cleans up the debugfs hook for Flow Director since it was meant for
debug only.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 21445c91 65e87c03
......@@ -74,7 +74,7 @@
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
......@@ -710,8 +710,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
......
......@@ -1430,84 +1430,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
kfree(desc);
desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_filter fd_data;
u16 packet_len, i, j = 0;
char *asc_packet;
u8 *raw_packet;
bool add = false;
int ret;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
goto command_write_done;
if (strncmp(cmd_buf, "add", 3) == 0)
add = true;
if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
goto command_write_done;
asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
GFP_KERNEL);
if (!asc_packet)
goto command_write_done;
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
GFP_KERNEL);
if (!raw_packet) {
kfree(asc_packet);
asc_packet = NULL;
goto command_write_done;
}
cnt = sscanf(&cmd_buf[13],
"%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
&fd_data.flex_off, &fd_data.pctype,
&fd_data.dest_vsi, &fd_data.dest_ctl,
&fd_data.fd_status, &fd_data.cnt_index,
&fd_data.fd_id, &packet_len, asc_packet);
if (cnt != 10) {
dev_info(&pf->pdev->dev,
"program fd_filter: bad command string, cnt=%d\n",
cnt);
kfree(asc_packet);
asc_packet = NULL;
kfree(raw_packet);
goto command_write_done;
}
/* fix packet length if user entered 0 */
if (packet_len == 0)
packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
/* make sure to check the max as well */
packet_len = min_t(u16,
packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
for (i = 0; i < packet_len; i++) {
cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
if (!cnt)
break;
j += 3;
}
dev_info(&pf->pdev->dev, "FD raw packet dump\n");
print_hex_dump(KERN_INFO, "FD raw packet: ",
DUMP_PREFIX_OFFSET, 16, 1,
raw_packet, packet_len, true);
ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
} else {
dev_info(&pf->pdev->dev,
"Filter command send failed %d\n", ret);
}
kfree(raw_packet);
raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
......@@ -1732,8 +1654,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " globr\n");
dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd current cnt");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
......
......@@ -1970,11 +1970,22 @@ static int i40e_set_phys_id(struct net_device *netdev,
* 125us (8000 interrupts per second) == ITR(62)
*/
/**
* __i40e_get_coalesce - get per-queue coalesce settings
* @netdev: the netdev to check
* @ec: ethtool coalesce data structure
* @queue: which queue to pick
*
* Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
* are per queue. If queue is <0 then we default to queue 0 as the
* representative value.
**/
static int __i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_ring *rx_ring, *tx_ring;
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
......@@ -1989,14 +2000,18 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return -EINVAL;
}
if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting))
rx_ring = vsi->rx_rings[queue];
tx_ring = vsi->tx_rings[queue];
if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting))
if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
......@@ -2010,18 +2025,44 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return 0;
}
/**
* i40e_get_coalesce - get a netdev's coalesce settings
* @netdev: the netdev to check
* @ec: ethtool coalesce data structure
*
* Gets the coalesce settings for a particular netdev. Note that if user has
* modified per-queue settings, this only guarantees to represent queue 0. See
* __i40e_get_coalesce for more details.
**/
static int i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, -1);
}
/**
* i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
* @netdev: netdev structure
* @ec: ethtool's coalesce settings
* @queue: the particular queue to read
*
* Will read a specific queue's coalesce settings
**/
static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, queue);
}
/**
* i40e_set_itr_per_queue - set ITR values for specific queue
* @vsi: the VSI to set values for
* @ec: coalesce settings from ethtool
* @queue: the queue to modify
*
* Change the ITR settings for a specific queue.
**/
static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
......@@ -2060,6 +2101,14 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
i40e_flush(hw);
}
/**
* __i40e_set_coalesce - set coalesce settings for particular queue
* @netdev: the netdev to change
* @ec: ethtool coalesce settings
* @queue: the queue to change
*
* Sets the coalesce settings for a particular queue.
**/
static int __i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
......@@ -2120,12 +2169,27 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return 0;
}
/**
* i40e_set_coalesce - set coalesce settings for every queue on the netdev
* @netdev: the netdev to change
* @ec: ethtool coalesce settings
*
* This will set each queue to the same coalesce settings.
**/
static int i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_set_coalesce(netdev, ec, -1);
}
/**
* i40e_set_per_queue_coalesce - set specific queue's coalesce settings
* @netdev: the netdev to change
* @ec: ethtool's coalesce settings
* @queue: the queue to change
*
* Sets the specified queue's coalesce settings.
**/
static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
......@@ -3021,6 +3085,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush current ATR settings */
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
......
......@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
#define DRV_VERSION_BUILD 12
#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
......@@ -1315,7 +1315,7 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
/* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
......@@ -1908,7 +1908,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
if (f->vlan == I40E_VLAN_ANY) {
del_list[num_del].vlan_tag = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
} else {
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan));
......@@ -5242,7 +5242,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
......@@ -5939,13 +5939,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
/* Wait for some more space to be available to turn on ATR */
/* Wait for some more space to be available to turn on ATR. We also
* must check that no existing ntuple rules for TCP are in effect
*/
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->fd_tcp_rule == 0)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
......@@ -5976,9 +5980,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
int fd_room;
int reg;
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
if (!time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
return;
......@@ -5998,7 +5999,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
......@@ -6018,7 +6019,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
......@@ -6052,9 +6053,6 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->state))
return;
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
......@@ -8284,6 +8282,8 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
if (!vsi->rss_size)
return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
......@@ -8608,7 +8608,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_NO_PCI_LINK_CHECK |
I40E_FLAG_100M_SGMII_CAPABLE |
I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
......@@ -8683,13 +8682,13 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
}
return need_reset;
}
......@@ -11336,11 +11335,7 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown the adminq */
ret_code = i40e_shutdown_adminq(hw);
if (ret_code)
dev_warn(&pdev->dev,
"Failed to destroy the Admin Queue resources: %d\n",
ret_code);
i40e_shutdown_adminq(hw);
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
......
......@@ -40,6 +40,69 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
}
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_fdir - Generate a Flow Director descriptor based on fdata
* @tx_ring: Tx ring to send buffer on
* @fdata: Flow director filter data
* @add: Indicate if we are adding a rule or deleting one
*
**/
static void i40e_fdir(struct i40e_ring *tx_ring,
struct i40e_fdir_filter *fdata, bool add)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
u32 flex_ptype, dtype_cmd;
u16 i;
/* grab the next descriptor */
i = tx_ring->next_to_use;
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
(fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
(fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
(fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
/* Use LAN VSI Id if not programmed by user */
flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
dtype_cmd |= add ?
I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT :
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT;
dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
(fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
(fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
if (fdata->cnt_index) {
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
((u32)fdata->cnt_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
}
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
}
#define I40E_FD_CLEAN_DELAY 10
/**
* i40e_program_fdir_filter - Program a Flow Director filter
......@@ -48,14 +111,13 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
* @pf: The PF pointer
* @add: True for add/update, False for remove
**/
int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add)
static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
u8 *raw_packet, struct i40e_pf *pf,
bool add)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_tx_buffer *tx_buf, *first;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
......@@ -92,56 +154,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* grab the next descriptor */
i = tx_ring->next_to_use;
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
first = &tx_ring->tx_bi[i];
memset(first, 0, sizeof(struct i40e_tx_buffer));
tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK;
fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
I40E_TXD_FLTR_QW0_PCTYPE_MASK;
/* Use LAN VSI Id if not programmed by user */
if (fdir_data->dest_vsi == 0)
fpt |= (pf->vsi[pf->lan_vsi]->id) <<
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
else
fpt |= ((u32)fdir_data->dest_vsi <<
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
if (add)
dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT;
else
dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT;
dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
I40E_TXD_FLTR_QW1_DEST_MASK;
dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
if (fdir_data->cnt_index != 0) {
dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dcc |= ((u32)fdir_data->cnt_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
fdir_desc->rsvd = cpu_to_le32(0);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
i40e_fdir(tx_ring, fdir_data, add);
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
......@@ -282,18 +296,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
}
......@@ -532,7 +546,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
dev_kfree_skb_any(tx_buffer->skb);
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
......@@ -545,9 +562,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
......@@ -584,8 +598,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index));
netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
......@@ -754,8 +767,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
/* notify netdev of completed buffers */
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
......@@ -1864,6 +1877,15 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
{
return !!(vsi->rx_rings[idx]->rx_itr_setting);
}
static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
{
return !!(vsi->tx_rings[idx]->tx_itr_setting);
}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
......@@ -1879,6 +1901,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
......@@ -1887,18 +1910,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
rx_itr_setting = get_rx_itr_enabled(vsi, idx);
tx_itr_setting = get_tx_itr_enabled(vsi, idx);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
!ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
(!ITR_IS_DYNAMIC(rx_itr_setting) &&
!ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
if (ITR_IS_DYNAMIC(tx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
......@@ -2784,9 +2810,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
first->bytecount);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
......@@ -2811,13 +2835,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index))) {
!netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)) &&
!netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
......
......@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
#endif /* _I40E_TXRX_H_ */
......@@ -163,6 +163,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
......
......@@ -51,7 +51,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
dev_kfree_skb_any(tx_buffer->skb);
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
......@@ -64,9 +67,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
......@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index));
netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
......@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
/* notify netdev of completed buffers */
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
......@@ -1312,6 +1311,19 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
return !!(adapter->rx_rings[idx].rx_itr_setting);
}
static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
return !!(adapter->tx_rings[idx].tx_itr_setting);
}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
......@@ -1326,6 +1338,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
......@@ -1334,18 +1348,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
rx_itr_setting = get_rx_itr_enabled(vsi, idx);
tx_itr_setting = get_tx_itr_enabled(vsi, idx);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
!ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
(!ITR_IS_DYNAMIC(rx_itr_setting) &&
!ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
......@@ -2012,9 +2029,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
first->bytecount);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
......@@ -2039,13 +2054,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index))) {
!netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)) &&
!netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
......
......@@ -287,6 +287,14 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
/* high bit set means dynamic, use accessors routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_buf_len;
......@@ -445,4 +453,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
#endif /* _I40E_TXRX_H_ */
......@@ -160,6 +160,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
......
......@@ -59,13 +59,6 @@ struct i40e_vsi {
unsigned long state;
int base_vector;
u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 qs_handle;
};
......
......@@ -296,92 +296,206 @@ static int i40evf_set_ringparam(struct net_device *netdev,
}
/**
* i40evf_get_coalesce - Get interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
* __i40evf_get_coalesce - get per-queue coalesce settings
* @netdev: the netdev to check
* @ec: ethtool coalesce data structure
* @queue: which queue to pick
*
* Returns current coalescing settings. This is referred to elsewhere in the
* driver as Interrupt Throttle Rate, as this is how the hardware describes
* this functionality.
* Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
* are per queue. If queue is <0 then we default to queue 0 as the
* representative value.
**/
static int i40evf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
static int __i40evf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_ring *rx_ring, *tx_ring;
ec->tx_max_coalesced_frames = vsi->work_limit;
ec->rx_max_coalesced_frames = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
/* Rx and Tx usecs per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
*/
if (queue < 0)
queue = 0;
else if (queue >= adapter->num_active_queues)
return -EINVAL;
rx_ring = &adapter->rx_rings[queue];
tx_ring = &adapter->tx_rings[queue];
if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
/**
* i40evf_set_coalesce - Set interrupt coalescing settings
* i40evf_get_coalesce - Get interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
*
* Change current coalescing settings.
* Returns current coalescing settings. This is referred to elsewhere in the
* driver as Interrupt Throttle Rate, as this is how the hardware describes
* this functionality. Note that if per-queue settings have been modified this
* only represents the settings of queue 0.
**/
static int i40evf_set_coalesce(struct net_device *netdev,
static int i40evf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
return __i40evf_get_coalesce(netdev, ec, -1);
}
/**
* i40evf_get_per_queue_coalesce - get coalesce values for specific queue
* @netdev: netdev to read
* @ec: coalesce settings from ethtool
* @queue: the queue to read
*
* Read specific queue's coalesce settings.
**/
static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
u32 queue,
struct ethtool_coalesce *ec)
{
return __i40evf_get_coalesce(netdev, ec, queue);
}
/**
* i40evf_set_itr_per_queue - set ITR values for specific queue
* @vsi: the VSI to set values for
* @ec: coalesce settings from ethtool
* @queue: the queue to modify
*
* Change the ITR settings for a specific queue.
**/
static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
u16 vector;
adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
if (ec->use_adaptive_rx_coalesce)
adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
else
adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
else
adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
q_vector = adapter->rx_rings[queue].q_vector;
q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
q_vector = adapter->tx_rings[queue].q_vector;
q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
i40e_flush(hw);
}
/**
* __i40evf_set_coalesce - set coalesce settings for particular queue
* @netdev: the netdev to change
* @ec: ethtool coalesce settings
* @queue: the queue to change
*
* Sets the coalesce settings for a particular queue.
**/
static int __i40evf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
(ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
else
if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
} else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
(ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
}
if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
(ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
else if (ec->use_adaptive_tx_coalesce)
vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
else
if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce)
netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
} else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
(ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
if (ec->use_adaptive_rx_coalesce)
vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
else
vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
else
vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
q_vector = &adapter->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
i40e_flush(hw);
/* Rx and Tx usecs has per queue value. If user doesn't specify the
* queue, apply to all queues.
*/
if (queue < 0) {
for (i = 0; i < adapter->num_active_queues; i++)
i40evf_set_itr_per_queue(adapter, ec, i);
} else if (queue < adapter->num_active_queues) {
i40evf_set_itr_per_queue(adapter, ec, queue);
} else {
netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
adapter->num_active_queues - 1);
return -EINVAL;
}
return 0;
}
/**
* i40evf_set_coalesce - Set interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
*
* Change current coalescing settings for every queue.
**/
static int i40evf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40evf_set_coalesce(netdev, ec, -1);
}
/**
* i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
* @netdev: the netdev to change
* @ec: ethtool's coalesce settings
* @queue: the queue to modify
*
* Modifies a specific queue's coalesce settings.
*/
static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
u32 queue,
struct ethtool_coalesce *ec)
{
return __i40evf_set_coalesce(netdev, ec, queue);
}
/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
......@@ -533,6 +647,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
.get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
.set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
......
......@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
#define DRV_VERSION_BUILD 12
#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
......@@ -370,6 +370,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
struct i40e_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
......@@ -377,7 +378,10 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
q_vector->ring_mask |= BIT(r_idx);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
}
/**
......@@ -391,6 +395,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
struct i40e_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
......@@ -398,9 +403,10 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
q_vector->ring_mask |= BIT(t_idx);
wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
}
/**
......@@ -1154,6 +1160,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
......@@ -1162,6 +1169,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
return 0;
......@@ -2269,10 +2277,6 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment