Commit f4f7981e authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2015-12-03

This series contains updates to i40e and i40evf only.

Mitch updates the i40evf driver by increasing the maximum number of queues,
since future devices will allow for more queue pairs.  Cleans up a
duplicate printing of the driver info string done in init, since it is
already done in probe.  Cleaned up the several allocations which did
not need to be at atomic level, where GFP_KERNEL would work just fine.
Then makes i40e_sync_vsi_filters() a more mature function, make having
a common exit point so it will properly release the busy lock on the VSI
and propagate errors to the callers.  Then does some whitespace
housekeeping in i40evf.

Kiran moves and updates the detection/recovery of transmit queue hang code
to service_task from tx_timeout function.  Also fixed memory leak when
users program flow-director filter using ethtool (sideband filter
programming), the cause being the check of 'tx_buffer->skb' was preventing
'raw_buf' from being freed as part of the cleanup.

Jesse enabled the ability to turn off/on packet split using ethtool priv
flags.  Then does some housekeeping for both the i40e and i40evf drivers
which includes: remove unused/useless code, correct whitespace, remove
duplicate #include, fix incorrect comment, etc...

Neerav cleans up functions to gather Flow Control Rx XOFF stats, since
the recent change in the driver logic for checking transmit hang has been
moved, so these functions do not do anything meaningful any longer.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c5b6c3ee 4eeb1fff
...@@ -269,17 +269,6 @@ config I40E ...@@ -269,17 +269,6 @@ config I40E
To compile this driver as a module, choose M here. The module To compile this driver as a module, choose M here. The module
will be called i40e. will be called i40e.
config I40E_VXLAN
bool "Virtual eXtensible Local Area Network Support"
default n
depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
---help---
This allows one to create VXLAN virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
to tunnel virtual network infrastructure in virtualized environments.
Say Y here if you want to use Virtual eXtensible Local Area Network
(VXLAN) in the driver.
config I40E_DCB config I40E_DCB
bool "Data Center Bridging (DCB) Support" bool "Data Center Bridging (DCB) Support"
default n default n
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h> #include <linux/sctp.h>
#include <linux/pkt_sched.h> #include <linux/pkt_sched.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
...@@ -104,6 +103,7 @@ ...@@ -104,6 +103,7 @@
#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
#define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_FD_ATR BIT(2)
#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_PRIV_FLAGS_PS BIT(4)
#define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
...@@ -281,7 +281,7 @@ struct i40e_pf { ...@@ -281,7 +281,7 @@ struct i40e_pf {
u32 fd_atr_cnt; u32 fd_atr_cnt;
u32 fd_tcp_rule; u32 fd_tcp_rule;
#ifdef CONFIG_I40E_VXLAN #if IS_ENABLED(CONFIG_VXLAN)
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_vxlan_bitmap; u16 pending_vxlan_bitmap;
...@@ -322,9 +322,7 @@ struct i40e_pf { ...@@ -322,9 +322,7 @@ struct i40e_pf {
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
#define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_PTP BIT_ULL(25)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27) #define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27)
#endif
#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) #define I40E_FLAG_PORT_ID_VALID BIT_ULL(28)
#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) #define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31)
...@@ -579,6 +577,9 @@ struct i40e_q_vector { ...@@ -579,6 +577,9 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */ u8 num_ringpairs; /* total number of ring pairs in vector */
#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */
unsigned long hung_detected; /* Set/Reset for hung_detection logic */
cpumask_t affinity_mask; cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN]; char name[I40E_INT_NAME_STR_LEN];
...@@ -606,8 +607,8 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw) ...@@ -606,8 +607,8 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
full_ver = hw->nvm.oem_ver; full_ver = hw->nvm.oem_ver;
ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
& I40E_OEM_VER_BUILD_MASK); I40E_OEM_VER_BUILD_MASK);
patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
...@@ -715,7 +716,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, ...@@ -715,7 +716,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
void i40e_veb_release(struct i40e_veb *veb); void i40e_veb_release(struct i40e_veb *veb);
int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
void i40e_vsi_reset_stats(struct i40e_vsi *vsi); void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
void i40e_pf_reset_stats(struct i40e_pf *pf); void i40e_pf_reset_stats(struct i40e_pf *pf);
......
...@@ -231,6 +231,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { ...@@ -231,6 +231,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"LinkPolling", "LinkPolling",
"flow-director-atr", "flow-director-atr",
"veb-stats", "veb-stats",
"packet-split",
}; };
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
...@@ -2709,6 +2710,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev) ...@@ -2709,6 +2710,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
I40E_PRIV_FLAGS_FD_ATR : 0; I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0; I40E_PRIV_FLAGS_VEB_STATS : 0;
ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
I40E_PRIV_FLAGS_PS : 0;
return ret_flags; return ret_flags;
} }
...@@ -2723,6 +2726,26 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -2723,6 +2726,26 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
bool reset_required = false;
/* NOTE: MFP is not settable */
/* allow the user to control the method of receive
* buffer DMA, whether the packet is split at header
* boundaries into two separate buffers. In some cases
* one routine or the other will perform better.
*/
if ((flags & I40E_PRIV_FLAGS_PS) &&
!(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
pf->flags |= I40E_FLAG_RX_PS_ENABLED;
pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
reset_required = true;
} else if (!(flags & I40E_PRIV_FLAGS_PS) &&
(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
reset_required = true;
}
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
...@@ -2745,6 +2768,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -2745,6 +2768,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
else else
pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
/* if needed, issue reset to cause things to take effect */
if (reset_required)
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
return 0; return 0;
} }
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
/* Local includes */ /* Local includes */
#include "i40e.h" #include "i40e.h"
#include "i40e_diag.h" #include "i40e_diag.h"
#ifdef CONFIG_I40E_VXLAN #if IS_ENABLED(CONFIG_VXLAN)
#include <net/vxlan.h> #include <net/vxlan.h>
#endif #endif
...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = ...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 4 #define DRV_VERSION_BUILD 7
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -791,75 +791,6 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) ...@@ -791,75 +791,6 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
} }
#endif #endif
/**
* i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
* @pf: the corresponding PF
*
* Update the Rx XOFF counter (PAUSE frames) in link flow control mode
**/
static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
{
struct i40e_hw_port_stats *osd = &pf->stats_offsets;
struct i40e_hw_port_stats *nsd = &pf->stats;
struct i40e_hw *hw = &pf->hw;
u64 xoff = 0;
if ((hw->fc.current_mode != I40E_FC_FULL) &&
(hw->fc.current_mode != I40E_FC_RX_PAUSE))
return;
xoff = nsd->link_xoff_rx;
i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xoff_rx, &nsd->link_xoff_rx);
/* No new LFC xoff rx */
if (!(nsd->link_xoff_rx - xoff))
return;
}
/**
* i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
* @pf: the corresponding PF
*
* Update the Rx XOFF counter (PAUSE frames) in PFC mode
**/
static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
{
struct i40e_hw_port_stats *osd = &pf->stats_offsets;
struct i40e_hw_port_stats *nsd = &pf->stats;
bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
struct i40e_dcbx_config *dcb_cfg;
struct i40e_hw *hw = &pf->hw;
u16 i;
u8 tc;
dcb_cfg = &hw->local_dcbx_config;
/* Collect Link XOFF stats when PFC is disabled */
if (!dcb_cfg->pfc.pfcenable) {
i40e_update_link_xoff_rx(pf);
return;
}
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
u64 prio_xoff = nsd->priority_xoff_rx[i];
i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
pf->stat_offsets_loaded,
&osd->priority_xoff_rx[i],
&nsd->priority_xoff_rx[i]);
/* No new PFC xoff rx */
if (!(nsd->priority_xoff_rx[i] - prio_xoff))
continue;
/* Get the TC for given priority */
tc = dcb_cfg->etscfg.prioritytable[i];
xoff[tc] = true;
}
}
/** /**
* i40e_update_vsi_stats - Update the vsi statistics counters. * i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated * @vsi: the VSI to be updated
...@@ -1054,12 +985,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) ...@@ -1054,12 +985,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
pf->stat_offsets_loaded, pf->stat_offsets_loaded,
&osd->link_xon_tx, &nsd->link_xon_tx); &osd->link_xon_tx, &nsd->link_xon_tx);
i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xoff_rx, &nsd->link_xoff_rx);
i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
pf->stat_offsets_loaded, pf->stat_offsets_loaded,
&osd->link_xoff_tx, &nsd->link_xoff_tx); &osd->link_xoff_tx, &nsd->link_xoff_tx);
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
pf->stat_offsets_loaded,
&osd->priority_xoff_rx[i],
&nsd->priority_xoff_rx[i]);
i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
pf->stat_offsets_loaded, pf->stat_offsets_loaded,
&osd->priority_xon_rx[i], &osd->priority_xon_rx[i],
...@@ -1553,11 +1490,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1553,11 +1490,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
} }
ether_addr_copy(netdev->dev_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data);
/* schedule our worker thread which will take care of
* applying the new filter changes return i40e_sync_vsi_filters(vsi);
*/
i40e_service_event_schedule(vsi->back);
return 0;
} }
/** /**
...@@ -1872,8 +1806,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1872,8 +1806,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
bool add_happened = false; bool add_happened = false;
int filter_list_len = 0; int filter_list_len = 0;
u32 changed_flags = 0; u32 changed_flags = 0;
i40e_status aq_ret = 0;
bool err_cond = false; bool err_cond = false;
i40e_status ret = 0; int retval = 0;
struct i40e_pf *pf; struct i40e_pf *pf;
int num_add = 0; int num_add = 0;
int num_del = 0; int num_del = 0;
...@@ -1936,8 +1871,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1936,8 +1871,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
if (err_cond) if (err_cond) {
i40e_cleanup_add_list(&tmp_add_list); i40e_cleanup_add_list(&tmp_add_list);
retval = -ENOMEM;
goto out;
}
} }
/* Now process 'del_list' outside the lock */ /* Now process 'del_list' outside the lock */
...@@ -1955,7 +1893,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1955,7 +1893,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_del_filter_entries(vsi, &tmp_del_list);
i40e_undo_add_filter_entries(vsi); i40e_undo_add_filter_entries(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM; retval = -ENOMEM;
goto out;
} }
list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
...@@ -1973,18 +1912,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1973,18 +1912,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */ /* flush a full buffer */
if (num_del == filter_list_len) { if (num_del == filter_list_len) {
ret = i40e_aq_remove_macvlan(&pf->hw, aq_ret = i40e_aq_remove_macvlan(&pf->hw,
vsi->seid, del_list, num_del, vsi->seid,
NULL); del_list,
num_del,
NULL);
aq_err = pf->hw.aq.asq_last_status; aq_err = pf->hw.aq.asq_last_status;
num_del = 0; num_del = 0;
memset(del_list, 0, sizeof(*del_list)); memset(del_list, 0, sizeof(*del_list));
if (ret && aq_err != I40E_AQ_RC_ENOENT) if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
retval = -EIO;
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err)); i40e_aq_str(&pf->hw, aq_err));
}
} }
/* Release memory for MAC filter entries which were /* Release memory for MAC filter entries which were
* synced up with HW. * synced up with HW.
...@@ -1994,15 +1937,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1994,15 +1937,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
} }
if (num_del) { if (num_del) {
ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
del_list, num_del, NULL); del_list, num_del,
NULL);
aq_err = pf->hw.aq.asq_last_status; aq_err = pf->hw.aq.asq_last_status;
num_del = 0; num_del = 0;
if (ret && aq_err != I40E_AQ_RC_ENOENT) if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %s aq_err %s\n", "ignoring delete macvlan error, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err)); i40e_aq_str(&pf->hw, aq_err));
} }
...@@ -2026,7 +1970,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2026,7 +1970,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_undo_add_filter_entries(vsi); i40e_undo_add_filter_entries(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM; retval = -ENOMEM;
goto out;
} }
list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
...@@ -2047,13 +1992,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2047,13 +1992,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */ /* flush a full buffer */
if (num_add == filter_list_len) { if (num_add == filter_list_len) {
ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
add_list, num_add, add_list, num_add,
NULL); NULL);
aq_err = pf->hw.aq.asq_last_status; aq_err = pf->hw.aq.asq_last_status;
num_add = 0; num_add = 0;
if (ret) if (aq_ret)
break; break;
memset(add_list, 0, sizeof(*add_list)); memset(add_list, 0, sizeof(*add_list));
} }
...@@ -2065,18 +2010,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2065,18 +2010,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
} }
if (num_add) { if (num_add) {
ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
add_list, num_add, NULL); add_list, num_add, NULL);
aq_err = pf->hw.aq.asq_last_status; aq_err = pf->hw.aq.asq_last_status;
num_add = 0; num_add = 0;
} }
kfree(add_list); kfree(add_list);
add_list = NULL; add_list = NULL;
if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"add filter failed, err %s aq_err %s\n", "add filter failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err)); i40e_aq_str(&pf->hw, aq_err));
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
!test_bit(__I40E_FILTER_OVERFLOW_PROMISC, !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
...@@ -2094,16 +2040,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2094,16 +2040,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
bool cur_multipromisc; bool cur_multipromisc;
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
vsi->seid, vsi->seid,
cur_multipromisc, cur_multipromisc,
NULL); NULL);
if (ret) if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"set multi promisc failed, err %s aq_err %s\n", "set multi promisc failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status)); pf->hw.aq.asq_last_status));
}
} }
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc; bool cur_promisc;
...@@ -2122,36 +2071,47 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2122,36 +2071,47 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
} }
} else { } else {
ret = i40e_aq_set_vsi_unicast_promiscuous( aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
&vsi->back->hw, &vsi->back->hw,
vsi->seid, vsi->seid,
cur_promisc, NULL); cur_promisc, NULL);
if (ret) if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %d, aq_err %d\n", "set unicast promisc failed, err %d, aq_err %d\n",
ret, pf->hw.aq.asq_last_status); aq_ret, pf->hw.aq.asq_last_status);
ret = i40e_aq_set_vsi_multicast_promiscuous( }
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
&vsi->back->hw, &vsi->back->hw,
vsi->seid, vsi->seid,
cur_promisc, NULL); cur_promisc, NULL);
if (ret) if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %d, aq_err %d\n", "set multicast promisc failed, err %d, aq_err %d\n",
ret, pf->hw.aq.asq_last_status); aq_ret, pf->hw.aq.asq_last_status);
}
} }
ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
vsi->seid, vsi->seid,
cur_promisc, NULL); cur_promisc, NULL);
if (ret) if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"set brdcast promisc failed, err %s, aq_err %s\n", "set brdcast promisc failed, err %s, aq_err %s\n",
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status)); pf->hw.aq.asq_last_status));
}
} }
out:
clear_bit(__I40E_CONFIG_BUSY, &vsi->state); clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return 0; return retval;
} }
/** /**
...@@ -4368,17 +4328,41 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) ...@@ -4368,17 +4328,41 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
/* Bail out if interrupts are disabled because napi_poll
* execution in-progress or will get scheduled soon.
* napi_poll cleans TX and RX queues and updates 'next_to_clean'.
*/
if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))
return;
head = i40e_get_head(tx_ring); head = i40e_get_head(tx_ring);
tx_pending = i40e_get_tx_pending(tx_ring); tx_pending = i40e_get_tx_pending(tx_ring);
/* Interrupts are disabled and TX pending is non-zero, /* HW is done executing descriptors, updated HEAD write back,
* trigger the SW interrupt (don't wait). Worst case * but SW hasn't processed those descriptors. If interrupt is
* there will be one extra interrupt which may result * not generated from this point ON, it could result into
* into not cleaning any queues because queues are cleaned. * dev_watchdog detecting timeout on those netdev_queue,
* hence proactively trigger SW interrupt.
*/ */
if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) if (tx_pending) {
i40e_force_wb(vsi, tx_ring->q_vector); /* NAPI Poll didn't run and clear since it was set */
if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
&tx_ring->q_vector->hung_detected)) {
netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
vsi->seid, q_idx, tx_pending,
tx_ring->next_to_clean, head,
tx_ring->next_to_use,
readl(tx_ring->tail));
netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
vsi->seid, q_idx, val);
i40e_force_wb(vsi, tx_ring->q_vector);
} else {
/* First Chance - detected possible hung */
set_bit(I40E_Q_VECTOR_HUNG_DETECT,
&tx_ring->q_vector->hung_detected);
}
}
} }
/** /**
...@@ -5310,7 +5294,7 @@ int i40e_open(struct net_device *netdev) ...@@ -5310,7 +5294,7 @@ int i40e_open(struct net_device *netdev)
TCP_FLAG_CWR) >> 16); TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
#ifdef CONFIG_I40E_VXLAN #if IS_ENABLED(CONFIG_VXLAN)
vxlan_get_rx_port(netdev); vxlan_get_rx_port(netdev);
#endif #endif
...@@ -7006,7 +6990,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) ...@@ -7006,7 +6990,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw); i40e_flush(hw);
} }
#ifdef CONFIG_I40E_VXLAN #if IS_ENABLED(CONFIG_VXLAN)
/** /**
* i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
* @pf: board private structure * @pf: board private structure
...@@ -7073,7 +7057,7 @@ static void i40e_service_task(struct work_struct *work) ...@@ -7073,7 +7057,7 @@ static void i40e_service_task(struct work_struct *work)
i40e_watchdog_subtask(pf); i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf); i40e_fdir_reinit_subtask(pf);
i40e_sync_filters_subtask(pf); i40e_sync_filters_subtask(pf);
#ifdef CONFIG_I40E_VXLAN #if IS_ENABLED(CONFIG_VXLAN)
i40e_sync_vxlan_filters_subtask(pf); i40e_sync_vxlan_filters_subtask(pf);
#endif #endif
i40e_clean_adminq_subtask(pf); i40e_clean_adminq_subtask(pf);
...@@ -8449,7 +8433,7 @@ static int i40e_set_features(struct net_device *netdev, ...@@ -8449,7 +8433,7 @@ static int i40e_set_features(struct net_device *netdev,
return 0; return 0;
} }
#ifdef CONFIG_I40E_VXLAN #if IS_ENABLED(CONFIG_VXLAN)
/** /**
* i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
* @pf: board private structure * @pf: board private structure
...@@ -8769,7 +8753,7 @@ static const struct net_device_ops i40e_netdev_ops = { ...@@ -8769,7 +8753,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
#ifdef CONFIG_I40E_VXLAN #if IS_ENABLED(CONFIG_VXLAN)
.ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif #endif
......
...@@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, ...@@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"Filter deleted for PCTYPE %d loc = %d\n", "Filter deleted for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id); fd_data->pctype, fd_data->fd_id);
} }
if (err)
kfree(raw_packet);
return err ? -EOPNOTSUPP : 0; return err ? -EOPNOTSUPP : 0;
} }
...@@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, ...@@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id); fd_data->pctype, fd_data->fd_id);
} }
if (err)
kfree(raw_packet);
return err ? -EOPNOTSUPP : 0; return err ? -EOPNOTSUPP : 0;
} }
...@@ -322,7 +328,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, ...@@ -322,7 +328,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
* @fd_data: the flow director data required for the FDir descriptor * @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it * @add: true adds a filter, false removes it
* *
* Always returns -EOPNOTSUPP * Returns 0 if the filters were successfully added or removed
**/ **/
static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
struct i40e_fdir_filter *fd_data, struct i40e_fdir_filter *fd_data,
...@@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, ...@@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
} }
} }
if (err)
kfree(raw_packet);
return err ? -EOPNOTSUPP : 0; return err ? -EOPNOTSUPP : 0;
} }
...@@ -506,9 +515,6 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -506,9 +515,6 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->auto_disable_flags |= pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED; I40E_FLAG_FD_SB_ENABLED;
} }
} else {
dev_info(&pdev->dev,
"FD filter programming failed due to incorrect filter parameters\n");
} }
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask) if (I40E_DEBUG_FD & pf->hw.debug_mask)
...@@ -526,11 +532,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, ...@@ -526,11 +532,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer) struct i40e_tx_buffer *tx_buffer)
{ {
if (tx_buffer->skb) { if (tx_buffer->skb) {
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) dev_kfree_skb_any(tx_buffer->skb);
kfree(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len)) if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev, dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma), dma_unmap_addr(tx_buffer, dma),
...@@ -542,6 +544,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, ...@@ -542,6 +544,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
dma_unmap_len(tx_buffer, len), dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
tx_buffer->next_to_watch = NULL; tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL; tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0); dma_unmap_len_set(tx_buffer, len, 0);
...@@ -1863,7 +1869,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1863,7 +1869,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
q_vector->itr_countdown--; q_vector->itr_countdown--;
else else
q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->itr_countdown = ITR_COUNTDOWN_START;
} }
/** /**
...@@ -1891,12 +1896,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -1891,12 +1896,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
return 0; return 0;
} }
/* Clear hung_detected bit */
clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
/* Since the actual Tx work is minimal, we can give the Tx a larger /* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
i40e_for_each_ring(ring, q_vector->tx) { i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb |= ring->arm_wb; arm_wb = arm_wb || ring->arm_wb;
ring->arm_wb = false; ring->arm_wb = false;
} }
......
...@@ -290,8 +290,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, ...@@ -290,8 +290,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
next_q = find_first_bit(&linklistmap, next_q = find_first_bit(&linklistmap,
(I40E_MAX_VSI_QP * (I40E_MAX_VSI_QP *
I40E_VIRTCHNL_SUPPORTED_QTYPES)); I40E_VIRTCHNL_SUPPORTED_QTYPES));
vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
...@@ -1633,9 +1633,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1633,9 +1633,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi)) ret = i40e_sync_vsi_filters(vsi);
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters\n", if (ret)
vf->vf_id); dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
vf->vf_id, ret);
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
...@@ -1687,9 +1688,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1687,9 +1688,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi)) ret = i40e_sync_vsi_filters(vsi);
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters\n", if (ret)
vf->vf_id); dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
vf->vf_id, ret);
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
......
...@@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, ...@@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer) struct i40e_tx_buffer *tx_buffer)
{ {
if (tx_buffer->skb) { if (tx_buffer->skb) {
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) dev_kfree_skb_any(tx_buffer->skb);
kfree(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len)) if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev, dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma), dma_unmap_addr(tx_buffer, dma),
...@@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, ...@@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
dma_unmap_len(tx_buffer, len), dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
tx_buffer->next_to_watch = NULL; tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL; tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0); dma_unmap_len_set(tx_buffer, len, 0);
...@@ -127,17 +127,24 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) ...@@ -127,17 +127,24 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
} }
/** /**
* i40e_get_head - Retrieve head from head writeback * i40evf_get_tx_pending - how many Tx descriptors not processed
* @tx_ring: tx ring to fetch head of * @tx_ring: the ring of descriptors
* *
* Returns value of Tx ring head based on value stored * Since there is no access to the ring head register
* in head write-back location * in XL710, we need to use our local copies
**/ **/
static inline u32 i40e_get_head(struct i40e_ring *tx_ring) u32 i40evf_get_tx_pending(struct i40e_ring *ring)
{ {
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; u32 head, tail;
return le32_to_cpu(*(volatile __le32 *)head); head = i40e_get_head(ring);
tail = readl(ring->tail);
if (head != tail)
return (head < tail) ?
tail - head : (tail + ring->count - head);
return 0;
} }
#define WB_STRIDE 0x3 #define WB_STRIDE 0x3
...@@ -404,7 +411,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -404,7 +411,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
return false; return false;
} }
/* /**
* i40evf_setup_tx_descriptors - Allocate the Tx descriptors * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up * @tx_ring: the tx ring to set up
* *
...@@ -1252,10 +1259,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1252,10 +1259,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
rx = i40e_set_new_dynamic_itr(&q_vector->rx); rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
} }
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx); tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
} }
if (rx || tx) { if (rx || tx) {
/* get the higher of the two ITR adjustments and /* get the higher of the two ITR adjustments and
* use the same value for both ITR registers * use the same value for both ITR registers
...@@ -1291,7 +1300,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1291,7 +1300,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
q_vector->itr_countdown--; q_vector->itr_countdown--;
else else
q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->itr_countdown = ITR_COUNTDOWN_START;
} }
/** /**
...@@ -1324,7 +1332,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1324,7 +1332,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
*/ */
i40e_for_each_ring(ring, q_vector->tx) { i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb |= ring->arm_wb; arm_wb = arm_wb || ring->arm_wb;
ring->arm_wb = false; ring->arm_wb = false;
} }
...@@ -1545,7 +1553,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -1545,7 +1553,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
*tx_flags |= I40E_TX_FLAGS_IPV6; *tx_flags |= I40E_TX_FLAGS_IPV6;
} }
if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
(l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
(*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
...@@ -1644,7 +1651,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, ...@@ -1644,7 +1651,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
} }
/** /**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet * i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer * @skb: send buffer
* @tx_flags: collected send information * @tx_flags: collected send information
...@@ -1840,7 +1847,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1840,7 +1847,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
} }
#define WB_STRIDE 0x3
/* set next_to_watch value indicating a packet is present */ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; first->next_to_watch = tx_desc;
...@@ -1867,12 +1873,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1867,12 +1873,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* update tail and set RS bit on every packet. * update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true * if xmit_more is false and last_xmit_more was true
* update tail and set RS bit. * update tail and set RS bit.
* else (kernel < 3.18)
* if every packet spanned less than 4 desc
* then set RS bit on 4th packet and update tail
* on every packet
* else
* set RS bit on EOP for every packet and update tail
* *
* Optimization: wmb to be issued only in case of tail update. * Optimization: wmb to be issued only in case of tail update.
* Also optimize the Descriptor WB path for RS bit with the same * Also optimize the Descriptor WB path for RS bit with the same
......
...@@ -324,4 +324,19 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); ...@@ -324,4 +324,19 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
void i40evf_free_rx_resources(struct i40e_ring *rx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget); int i40evf_napi_poll(struct napi_struct *napi, int budget);
u32 i40evf_get_tx_pending(struct i40e_ring *ring);
/**
* i40e_get_head - Retrieve head from head writeback
* @tx_ring: Tx ring to fetch head of
*
* Returns value of Tx ring head based on value stored
* in head write-back location
**/
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
{
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
return le32_to_cpu(*(volatile __le32 *)head);
}
#endif /* _I40E_TXRX_H_ */ #endif /* _I40E_TXRX_H_ */
...@@ -97,8 +97,7 @@ struct i40e_vsi { ...@@ -97,8 +97,7 @@ struct i40e_vsi {
#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \ #define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i])) (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
#define MAX_RX_QUEUES 8 #define MAX_QUEUES 16
#define MAX_TX_QUEUES MAX_RX_QUEUES
#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) #define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
......
...@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; ...@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] = static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver"; "Intel(R) XL710/X710 Virtual Function Network Driver";
#define DRV_VERSION "1.4.1" #define DRV_VERSION "1.4.3"
const char i40evf_driver_version[] = DRV_VERSION; const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] = static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2015 Intel Corporation."; "Copyright (c) 2013 - 2015 Intel Corporation.";
...@@ -259,7 +259,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) ...@@ -259,7 +259,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
{ {
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
int i; int i;
uint32_t dyn_ctl; u32 dyn_ctl;
if (mask & 1) { if (mask & 1) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
...@@ -1205,7 +1205,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) ...@@ -1205,7 +1205,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
err = i40evf_acquire_msix_vectors(adapter, v_budget); err = i40evf_acquire_msix_vectors(adapter, v_budget);
out: out:
adapter->netdev->real_num_tx_queues = pairs; netif_set_real_num_rx_queues(adapter->netdev, pairs);
netif_set_real_num_tx_queues(adapter->netdev, pairs);
return err; return err;
} }
...@@ -2511,7 +2512,6 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2511,7 +2512,6 @@ static void i40evf_init_task(struct work_struct *work)
if (netdev->features & NETIF_F_GRO) if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n"); dev_info(&pdev->dev, "GRO is enabled\n");
dev_info(&pdev->dev, "%s\n", i40evf_driver_string);
adapter->state = __I40EVF_DOWN; adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state); set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter); i40evf_misc_irq_enable(adapter);
...@@ -2615,8 +2615,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2615,8 +2615,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
MAX_TX_QUEUES);
if (!netdev) { if (!netdev) {
err = -ENOMEM; err = -ENOMEM;
goto err_alloc_etherdev; goto err_alloc_etherdev;
......
...@@ -242,7 +242,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -242,7 +242,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_ATOMIC); vqci = kzalloc(len, GFP_KERNEL);
if (!vqci) if (!vqci)
return; return;
...@@ -353,7 +353,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) ...@@ -353,7 +353,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_irq_map_info) + len = sizeof(struct i40e_virtchnl_irq_map_info) +
(adapter->num_msix_vectors * (adapter->num_msix_vectors *
sizeof(struct i40e_virtchnl_vector_map)); sizeof(struct i40e_virtchnl_vector_map));
vimi = kzalloc(len, GFP_ATOMIC); vimi = kzalloc(len, GFP_KERNEL);
if (!vimi) if (!vimi)
return; return;
...@@ -421,7 +421,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ...@@ -421,7 +421,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
more = true; more = true;
} }
veal = kzalloc(len, GFP_ATOMIC); veal = kzalloc(len, GFP_KERNEL);
if (!veal) if (!veal)
return; return;
...@@ -483,7 +483,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) ...@@ -483,7 +483,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
(count * sizeof(struct i40e_virtchnl_ether_addr)); (count * sizeof(struct i40e_virtchnl_ether_addr));
more = true; more = true;
} }
veal = kzalloc(len, GFP_ATOMIC); veal = kzalloc(len, GFP_KERNEL);
if (!veal) if (!veal)
return; return;
...@@ -547,7 +547,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) ...@@ -547,7 +547,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16)); (count * sizeof(u16));
more = true; more = true;
} }
vvfl = kzalloc(len, GFP_ATOMIC); vvfl = kzalloc(len, GFP_KERNEL);
if (!vvfl) if (!vvfl)
return; return;
...@@ -609,7 +609,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) ...@@ -609,7 +609,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16)); (count * sizeof(u16));
more = true; more = true;
} }
vvfl = kzalloc(len, GFP_ATOMIC); vvfl = kzalloc(len, GFP_KERNEL);
if (!vvfl) if (!vvfl)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment