Commit 8ed9b5e1 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-05-28

This series contains updates to ethtool, ixgbe, i40e and i40evf.

John adds helper routines for ethtool to pass VF to rx_flow_spec.  Since
the ring_cookie is 64 bits wide which is much larger than what could be
used for actual queue index values, provide helper routines to pack a VF
index into the cookie.  Then John provides a ixgbe patch to allow flow
director to use the entire queue space.

Neerav provides a i40e patch to collect XOFF Rx stats, where it was not
being collected before.

Anjali provides ATR support for tunneled packets, as well as stats to
count tunnel ATR hits.  Cleaned up PF struct members which are
unnecessary, since we can use the stat index macro directly.  Cleaned
up flow director ATR/SB messages to a higher debug level since they
are not useful unless silicon validation is happening.

Greg provides a patch to disable offline diagnostics if VFs are enabled
since ethtool offline diagnostic tests are not designed (out of scope)
to disable VF functions for testing and re-enable afterward.  Also cleans
up TODO comment that is no longer needed.

Vasu provides a fix an FCoE EOF case where i40e_fcoe_ctxt_eof() maybe
called before i40e_fcoe_eof_is_supported() is called.

Jesse adds skb->xmit_more support for i40evf.  Then provides a performance
enhancement for i40evf by inlining some functions which provides a 15%
gain in small packet performance.  Also cleans up the use of time_stamp
since it is no longer used to determine if there is a tx_hang and was
a part of a previous tx_hang design which is no longer used.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1ea23a21 f029094e
...@@ -182,6 +182,7 @@ struct i40e_lump_tracking { ...@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
enum i40e_fd_stat_idx { enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR, I40E_FD_STAT_ATR,
I40E_FD_STAT_SB, I40E_FD_STAT_SB,
I40E_FD_STAT_ATR_TUNNEL,
I40E_FD_STAT_PF_COUNT I40E_FD_STAT_PF_COUNT
}; };
#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
...@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx { ...@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
#define I40E_FD_SB_STAT_IDX(pf_id) \ #define I40E_FD_SB_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
struct i40e_fdir_filter { struct i40e_fdir_filter {
struct hlist_node fdir_node; struct hlist_node fdir_node;
...@@ -263,8 +266,6 @@ struct i40e_pf { ...@@ -263,8 +266,6 @@ struct i40e_pf {
struct hlist_head fdir_filter_list; struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters; u16 fdir_pf_active_filters;
u16 fd_sb_cnt_idx;
u16 fd_atr_cnt_idx;
unsigned long fd_flush_timestamp; unsigned long fd_flush_timestamp;
u32 fd_flush_cnt; u32 fd_flush_cnt;
u32 fd_add_err; u32 fd_add_err;
......
...@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { ...@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
/* LPI stats */ /* LPI stats */
...@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data) ...@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
return *data; return *data;
} }
static inline bool i40e_active_vfs(struct i40e_pf *pf)
{
struct i40e_vf *vfs = pf->vf;
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
return true;
return false;
}
static void i40e_diag_test(struct net_device *netdev, static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data) struct ethtool_test *eth_test, u64 *data)
{ {
...@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
netif_info(pf, drv, netdev, "offline testing starting\n"); netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state); set_bit(__I40E_TESTING, &pf->state);
if (i40e_active_vfs(pf)) {
dev_warn(&pf->pdev->dev,
"Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LOOPBACK] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
goto skip_ol_tests;
}
/* If the device is online then take it offline */ /* If the device is online then take it offline */
if (if_running) if (if_running)
/* indicate we're in test mode */ /* indicate we're in test mode */
...@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_LOOPBACK] = 0; data[I40E_ETH_TEST_LOOPBACK] = 0;
} }
skip_ol_tests:
netif_info(pf, drv, netdev, "testing finished\n"); netif_info(pf, drv, netdev, "testing finished\n");
} }
...@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, ...@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0; input->pctype = 0;
input->dest_vsi = vsi->id; input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
input->cnt_index = pf->fd_sb_cnt_idx; input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
input->flow_type = fsp->flow_type; input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
......
...@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) ...@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
* *
* The FC EOF is converted to the value understood by HW for descriptor * The FC EOF is converted to the value understood by HW for descriptor
* programming. Never call this w/o calling i40e_fcoe_eof_is_supported() * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
* first. * first and that already checks for all supported valid eof values.
**/ **/
static inline u32 i40e_fcoe_ctxt_eof(u8 eof) static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
{ {
...@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) ...@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
case FC_EOF_A: case FC_EOF_A:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
default: default:
/* FIXME: still returns 0 */ /* Supported valid eof shall be already checked by
pr_err("Unrecognized EOF %x\n", eof); * calling i40e_fcoe_eof_is_supported() first,
return 0; * therefore this default case shall never hit.
*/
WARN_ON(1);
return -EINVAL;
} }
} }
......
...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = ...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3 #define DRV_VERSION_MINOR 3
#define DRV_VERSION_BUILD 2 #define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) ...@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
dcb_cfg = &hw->local_dcbx_config; dcb_cfg = &hw->local_dcbx_config;
/* See if DCB enabled with PFC TC */ /* Collect Link XOFF stats when PFC is disabled */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || if (!dcb_cfg->pfc.pfcenable) {
!(dcb_cfg->pfc.pfcenable)) {
i40e_update_link_xoff_rx(pf); i40e_update_link_xoff_rx(pf);
return; return;
} }
...@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) ...@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_jabber, &nsd->rx_jabber); &osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */ /* FDIR stats */
i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), i40e_stat_update32(hw,
I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded, pf->stat_offsets_loaded,
&osd->fd_atr_match, &nsd->fd_atr_match); &osd->fd_atr_match, &nsd->fd_atr_match);
i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), i40e_stat_update32(hw,
I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded, pf->stat_offsets_loaded,
&osd->fd_sb_match, &nsd->fd_sb_match); &osd->fd_sb_match, &nsd->fd_sb_match);
i40e_stat_update32(hw,
I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT); val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status = nsd->tx_lpi_status =
...@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi) ...@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
pf->fd_add_err = pf->fd_atr_cnt = 0; pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) { if (pf->fd_tcp_rule > 0) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0; pf->fd_tcp_rule = 0;
} }
i40e_fdir_filter_restore(vsi); i40e_fdir_filter_restore(vsi);
...@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) ...@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
} }
} }
/* Wait for some more space to be available to turn on ATR */ /* Wait for some more space to be available to turn on ATR */
...@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) ...@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
} }
} }
} }
...@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) ...@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(time_after(jiffies, min_flush_time)) && if (!(time_after(jiffies, min_flush_time)) &&
(fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
disable_atr = true; disable_atr = true;
} }
...@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) ...@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!disable_atr) if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
} }
} }
} }
...@@ -7676,12 +7686,8 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -7676,12 +7686,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) { (pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
/* Setup a counter for fd_atr per PF */
pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_ENABLED;
/* Setup a counter for fd_sb per PF */
pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else { } else {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n"); "Flow Director Sideband mode Disabled in MFP mode\n");
...@@ -7771,7 +7777,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) ...@@ -7771,7 +7777,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0; pf->fdir_pf_active_filters = 0;
pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */ /* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
......
...@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, ...@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
/* set the timestamp */
tx_buf->time_stamp = jiffies;
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. * know there are new descriptors to fetch.
*/ */
...@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, ...@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) { if (add) {
pf->fd_tcp_rule++; pf->fd_tcp_rule++;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
} }
} else { } else {
...@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, ...@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
(pf->fd_tcp_rule - 1) : 0; (pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) { if (pf->fd_tcp_rule == 0) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
} }
} }
...@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->auto_disable_flags & !(pf->auto_disable_flags &
I40E_FLAG_FD_SB_ENABLED)) { I40E_FLAG_FD_SB_ENABLED)) {
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |= pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED; I40E_FLAG_FD_SB_ENABLED;
} }
...@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid, tx_ring->vsi->seid,
tx_ring->queue_index, tx_ring->queue_index,
tx_ring->next_to_use, i); tx_ring->next_to_use, i);
dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
...@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */ /* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
continue; continue;
} }
...@@ -1923,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -1923,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* i40e_atr - Add a Flow Director ATR filter * i40e_atr - Add a Flow Director ATR filter
* @tx_ring: ring to add programming descriptor to * @tx_ring: ring to add programming descriptor to
* @skb: send buffer * @skb: send buffer
* @flags: send flags * @tx_flags: send tx flags
* @protocol: wire protocol * @protocol: wire protocol
**/ **/
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 flags, __be16 protocol) u32 tx_flags, __be16 protocol)
{ {
struct i40e_filter_program_desc *fdir_desc; struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back; struct i40e_pf *pf = tx_ring->vsi->back;
...@@ -1952,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1952,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate) if (!tx_ring->atr_sample_rate)
return; return;
/* snag network header to get L4 type and address */ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
hdr.network = skb_network_header(skb); return;
/* Currently only IPv4/IPv6 with TCP is supported */ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
if (protocol == htons(ETH_P_IP)) { /* snag network header to get L4 type and address */
if (hdr.ipv4->protocol != IPPROTO_TCP) hdr.network = skb_network_header(skb);
return;
/* access ihl as a u8 to avoid unaligned access on ia64 */ /* Currently only IPv4/IPv6 with TCP is supported
hlen = (hdr.network[0] & 0x0F) << 2; * access ihl as u8 to avoid unaligned access on ia64
} else if (protocol == htons(ETH_P_IPV6)) { */
if (hdr.ipv6->nexthdr != IPPROTO_TCP) if (tx_flags & I40E_TX_FLAGS_IPV4)
hlen = (hdr.network[0] & 0x0F) << 2;
else if (protocol == htons(ETH_P_IPV6))
hlen = sizeof(struct ipv6hdr);
else
return; return;
hlen = sizeof(struct ipv6hdr);
} else { } else {
return; hdr.network = skb_inner_network_header(skb);
hlen = skb_inner_network_header_len(skb);
} }
/* Currently only IPv4/IPv6 with TCP is supported
* Note: tx_flags gets modified to reflect inner protocols in
* tx_enable_csum function if encap is enabled.
*/
if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
(hdr.ipv4->protocol != IPPROTO_TCP))
return;
else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
(hdr.ipv6->nexthdr != IPPROTO_TCP))
return;
th = (struct tcphdr *)(hdr.network + hlen); th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */ /* Due to lack of space, no more new filters can be programmed */
...@@ -2020,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2020,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dtype_cmd |= if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & dtype_cmd |=
I40E_TXD_FLTR_QW1_CNTINDEX_MASK; ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
else
dtype_cmd |=
((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0); fdir_desc->rsvd = cpu_to_le32(0);
...@@ -2043,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2043,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* otherwise returns 0 to indicate the flags has been set properly. * otherwise returns 0 to indicate the flags has been set properly.
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#else
static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *flags) u32 *flags)
#else
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#endif #endif
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
...@@ -2117,16 +2130,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -2117,16 +2130,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
* i40e_tso - set up the tso context descriptor * i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending * @skb: ptr to the skb we're sending
* @tx_flags: the collected send information
* @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header * @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits * @cd_tunneling: ptr to context descriptor bits
* *
* Returns 0 if no TSO can happen, 1 if tso is going, or error * Returns 0 if no TSO can happen, 1 if tso is going, or error
**/ **/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol, u8 *hdr_len, u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) u32 *cd_tunneling)
{ {
u32 cd_cmd, cd_tso_len, cd_mss; u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h; struct ipv6hdr *ipv6h;
...@@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
/** /**
* i40e_tx_enable_csum - Enable Tx checksum offloads * i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer * @skb: send buffer
* @tx_flags: Tx flags currently set * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set * @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set * @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits * @cd_tunneling: ptr to context desc bits
**/ **/
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset, u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *cd_tunneling) u32 *cd_tunneling)
...@@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) { switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP: case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break; break;
default: default:
return; return;
...@@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb);
if (tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_IPV4) {
if (*tx_flags & I40E_TX_FLAGS_TSO) {
if (tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0; ip_hdr(skb)->check = 0;
} else { } else {
*cd_tunneling |= *cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
} }
} else if (tx_flags & I40E_TX_FLAGS_IPV6) { } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
if (tx_flags & I40E_TX_FLAGS_TSO) if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0; ip_hdr(skb)->check = 0;
} }
...@@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) << skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT; I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) { if (this_ip_hdr->version == 6) {
tx_flags &= ~I40E_TX_FLAGS_IPV4; *tx_flags &= ~I40E_TX_FLAGS_IPV4;
tx_flags |= I40E_TX_FLAGS_IPV6; *tx_flags |= I40E_TX_FLAGS_IPV6;
} }
} else { } else {
network_hdr_len = skb_network_header_len(skb); network_hdr_len = skb_network_header_len(skb);
...@@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
} }
/* Enable IP checksum offloads */ /* Enable IP checksum offloads */
if (tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol; l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we /* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO. * need the hardware to recompute it is in the case of TSO.
*/ */
if (tx_flags & I40E_TX_FLAGS_TSO) { if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0; this_ip_hdr->check = 0;
} else { } else {
...@@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */ /* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) << *td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT; I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (tx_flags & I40E_TX_FLAGS_IPV6) { } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr; l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */ /* Now set the td_offset for IP header length */
...@@ -2394,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -2394,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* Returns 0 if stop is not needed * Returns 0 if stop is not needed
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else #else
static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif #endif
{ {
if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
...@@ -2476,13 +2487,13 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -2476,13 +2487,13 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#else
static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
#else
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif #endif
{ {
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
...@@ -2588,9 +2599,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2588,9 +2599,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index), tx_ring->queue_index),
first->bytecount); first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
...@@ -2643,11 +2651,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2643,11 +2651,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* one descriptor. * one descriptor.
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
int i40e_xmit_descriptor_count(struct sk_buff *skb, inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#else
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring) struct i40e_ring *tx_ring)
#else
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#endif #endif
{ {
unsigned int f; unsigned int f;
...@@ -2709,7 +2717,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2709,7 +2717,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6)) else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6; tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling); &cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0) if (tso < 0)
...@@ -2735,7 +2743,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2735,7 +2743,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM; tx_flags |= I40E_TX_FLAGS_CSUM;
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling); tx_ring, &cd_tunneling);
} }
......
...@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t { ...@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) #define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
...@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t { ...@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer { struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch; struct i40e_tx_desc *next_to_watch;
unsigned long time_stamp;
union { union {
struct sk_buff *skb; struct sk_buff *skb;
void *raw_buf; void *raw_buf;
......
...@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats { ...@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
/* flow director stats */ /* flow director stats */
u64 fd_atr_match; u64 fd_atr_match;
u64 fd_sb_match; u64 fd_sb_match;
u64 fd_atr_tunnel_match;
/* EEE LPI */ /* EEE LPI */
u32 tx_lpi_status; u32 tx_lpi_status;
u32 rx_lpi_status; u32 rx_lpi_status;
......
...@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) ...@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev); int pre_existing_vfs = pci_num_vf(pdev);
int err = 0; int err = 0;
if (pf->state & __I40E_TESTING) {
dev_warn(&pdev->dev,
"Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
err = -EPERM;
goto err_out;
}
dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs) if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf); i40e_free_vfs(pf);
......
...@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid, tx_ring->vsi->seid,
tx_ring->queue_index, tx_ring->queue_index,
tx_ring->next_to_use, i); tx_ring->next_to_use, i);
dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
...@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */ /* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
continue; continue;
} }
...@@ -1350,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1350,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
} }
/** /**
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer * @skb: send buffer
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* @flags: the tx flags to be set * @flags: the tx flags to be set
...@@ -1361,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1361,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* Returns error code indicate the frame should be dropped upon error and the * Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly. * otherwise returns 0 to indicate the flags has been set properly.
**/ **/
static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *flags) u32 *flags)
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
u32 tx_flags = 0; u32 tx_flags = 0;
...@@ -1406,16 +1399,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -1406,16 +1399,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
* i40e_tso - set up the tso context descriptor * i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending * @skb: ptr to the skb we're sending
* @tx_flags: the collected send information
* @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header * @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits * @cd_tunneling: ptr to context descriptor bits
* *
* Returns 0 if no TSO can happen, 1 if tso is going, or error * Returns 0 if no TSO can happen, 1 if tso is going, or error
**/ **/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol, u8 *hdr_len, u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) u32 *cd_tunneling)
{ {
u32 cd_cmd, cd_tso_len, cd_mss; u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h; struct ipv6hdr *ipv6h;
...@@ -1466,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1466,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
/** /**
* i40e_tx_enable_csum - Enable Tx checksum offloads * i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer * @skb: send buffer
* @tx_flags: Tx flags currently set * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set * @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set * @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits * @cd_tunneling: ptr to context desc bits
**/ **/
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset, u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *cd_tunneling) u32 *cd_tunneling)
...@@ -1487,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -1487,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) { switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP: case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break; break;
default: default:
return; return;
...@@ -1496,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -1496,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb);
if (tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_IPV4) {
if (*tx_flags & I40E_TX_FLAGS_TSO) {
if (tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0; ip_hdr(skb)->check = 0;
} else { } else {
*cd_tunneling |= *cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
} }
} else if (tx_flags & I40E_TX_FLAGS_IPV6) { } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
if (tx_flags & I40E_TX_FLAGS_TSO) if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0; ip_hdr(skb)->check = 0;
} }
...@@ -1519,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -1519,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) << skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT; I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) { if (this_ip_hdr->version == 6) {
tx_flags &= ~I40E_TX_FLAGS_IPV4; *tx_flags &= ~I40E_TX_FLAGS_IPV4;
tx_flags |= I40E_TX_FLAGS_IPV6; *tx_flags |= I40E_TX_FLAGS_IPV6;
} }
...@@ -1532,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -1532,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
} }
/* Enable IP checksum offloads */ /* Enable IP checksum offloads */
if (tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol; l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we /* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO. * need the hardware to recompute it is in the case of TSO.
*/ */
if (tx_flags & I40E_TX_FLAGS_TSO) { if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0; this_ip_hdr->check = 0;
} else { } else {
...@@ -1546,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ...@@ -1546,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */ /* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) << *td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT; I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (tx_flags & I40E_TX_FLAGS_IPV6) { } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr; l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */ /* Now set the td_offset for IP header length */
...@@ -1675,7 +1666,44 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -1675,7 +1666,44 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
} }
/** /**
* i40e_tx_map - Build the Tx descriptor * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns -EBUSY if a stop is needed, else 0
**/
static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}
/**
* i40evf_maybe_stop_tx - 1st level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40evf_maybe_stop_tx(tx_ring, size);
}
/**
* i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* @skb: send buffer * @skb: send buffer
* @first: first buffer info buffer to use * @first: first buffer info buffer to use
...@@ -1684,9 +1712,9 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -1684,9 +1712,9 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
* @td_cmd: the command field in the descriptor * @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
**/ **/
static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
{ {
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
...@@ -1792,9 +1820,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1792,9 +1820,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index), tx_ring->queue_index),
first->bytecount); first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
...@@ -1811,8 +1836,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1811,8 +1836,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */ /* notify HW of packet */
writel(i, tx_ring->tail); if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail);
return; return;
...@@ -1834,44 +1863,7 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1834,44 +1863,7 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
} }
/** /**
* __i40e_maybe_stop_tx - 2nd level check for tx stop conditions * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns -EBUSY if a stop is needed, else 0
**/
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}
/**
* i40e_maybe_stop_tx - 1st level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40e_maybe_stop_tx(tx_ring, size);
}
/**
* i40e_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer * @skb: send buffer
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* *
...@@ -1879,8 +1871,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -1879,8 +1871,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* there is not enough descriptors available in this ring since we need at least * there is not enough descriptors available in this ring since we need at least
* one descriptor. * one descriptor.
**/ **/
static int i40e_xmit_descriptor_count(struct sk_buff *skb, static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring) struct i40e_ring *tx_ring)
{ {
unsigned int f; unsigned int f;
int count = 0; int count = 0;
...@@ -1895,7 +1887,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -1895,7 +1887,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb)); count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
return 0; return 0;
} }
...@@ -1921,11 +1913,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1921,11 +1913,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0; u32 td_cmd = 0;
u8 hdr_len = 0; u8 hdr_len = 0;
int tso; int tso;
if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
/* prepare the xmit flags */ /* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop; goto out_drop;
/* obtain protocol of skb */ /* obtain protocol of skb */
...@@ -1940,7 +1932,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1940,7 +1932,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6)) else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6; tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling); &cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0) if (tso < 0)
...@@ -1961,17 +1953,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1961,17 +1953,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM; tx_flags |= I40E_TX_FLAGS_CSUM;
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling); tx_ring, &cd_tunneling);
} }
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2); cd_tunneling, cd_l2tag2);
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset); td_cmd, td_offset);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t { ...@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
...@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t { ...@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer { struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch; struct i40e_tx_desc *next_to_watch;
unsigned long time_stamp;
union { union {
struct sk_buff *skb; struct sk_buff *skb;
void *raw_buf; void *raw_buf;
......
...@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats { ...@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
/* flow director stats */ /* flow director stats */
u64 fd_atr_match; u64 fd_atr_match;
u64 fd_sb_match; u64 fd_sb_match;
u64 fd_atr_tunnel_match;
/* EEE LPI */ /* EEE LPI */
u32 tx_lpi_status; u32 tx_lpi_status;
u32 rx_lpi_status; u32 rx_lpi_status;
......
...@@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, ...@@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fdir_filter *input; struct ixgbe_fdir_filter *input;
union ixgbe_atr_input mask; union ixgbe_atr_input mask;
u8 queue;
int err; int err;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* /* ring_cookie is a masked into a set of queues and ixgbe pools or
* Don't allow programming if the action is a queue greater than * we use the drop index.
* the number of online Rx queues.
*/ */
if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
(fsp->ring_cookie >= adapter->num_rx_queues)) queue = IXGBE_FDIR_DROP_QUEUE;
return -EINVAL; } else {
u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
if (!vf && (ring >= adapter->num_rx_queues))
return -EINVAL;
else if (vf &&
((vf > adapter->num_vfs) ||
ring >= adapter->num_rx_queues_per_pool))
return -EINVAL;
/* Map the ring onto the absolute queue index */
if (!vf)
queue = adapter->rx_ring[ring]->reg_idx;
else
queue = ((vf - 1) *
adapter->num_rx_queues_per_pool) + ring;
}
/* Don't allow indexes to exist outside of available space */ /* Don't allow indexes to exist outside of available space */
if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
...@@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, ...@@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* program filters to filter memory */ /* program filters to filter memory */
err = ixgbe_fdir_write_perfect_filter_82599(hw, err = ixgbe_fdir_write_perfect_filter_82599(hw,
&input->filter, input->sw_idx, &input->filter, input->sw_idx, queue);
(input->action == IXGBE_FDIR_DROP_QUEUE) ?
IXGBE_FDIR_DROP_QUEUE :
adapter->rx_ring[input->action]->reg_idx);
if (err) if (err)
goto err_out_w_lock; goto err_out_w_lock;
......
...@@ -796,6 +796,31 @@ struct ethtool_rx_flow_spec { ...@@ -796,6 +796,31 @@ struct ethtool_rx_flow_spec {
__u32 location; __u32 location;
}; };
/* How rings are layed out when accessing virtual functions or
* offloaded queues is device specific. To allow users to do flow
* steering and specify these queues the ring cookie is partitioned
* into a 32bit queue index with an 8 bit virtual function id.
* This also leaves the 3bytes for further specifiers. It is possible
* future devices may support more than 256 virtual functions if
* devices start supporting PCIe w/ARI. However at the moment I
* do not know of any devices that support this so I do not reserve
* space for this at this time. If a future patch consumes the next
* byte it should be aware of this possiblity.
*/
#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
{
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
};
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
{
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
};
/** /**
* struct ethtool_rxnfc - command to get or set RX flow classification rules * struct ethtool_rxnfc - command to get or set RX flow classification rules
* @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment