Commit a076d1bd authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-02-03

This series contains updates to i40e/i40evf only.

Jake fixes up the driver to not call i40e_vsi_kill_vlan() or
i40e_vsi_add_vlan() when the PVID is set or when the VID is less than 1.
Cleaned up a check which really is not needed since there is no real
reason why we cannot just call i40e_del_mac_all_vlan() directly.  Renamed
functions to better reflect their actual purpose and how they function
in a more clear manner.

Bimmy cleans up unused/deprecated macros.

Mitch cleans up unused device ids which were intended for use when
running Linux VF drivers under Hyper-V, but found to be not needed.
Then cleaned up a function that is no longer needed since the client
open and close functions were refactored.  Adds a sleep without timeout
until the reply from the PF driver has been received since the iWARP
client cannot continue until the operation has been completed.

Tushar Dave fixes an issue seen on SPARC where the use of the 'packed'
directive was causing kernel unaligned errors.

Alex does a refactor to pull some data off of the stack and store it
in the transmit buffer info section of the transmit ring.

Alan fixes a bug which was caused by passing a bad register value to the
firmware, by refactoring the macro INTRL_USEC_TO_REG into a static
inline function.  Also added feedback to the user as to the actual
interrupt rate limit being used when it differs from the requested limit.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6e7bc478 33084060
...@@ -134,19 +134,6 @@ ...@@ -134,19 +134,6 @@
/* default to trying for four seconds */ /* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ) #define I40E_TRY_LINK_TIMEOUT (4 * HZ)
/**
* i40e_is_mac_710 - Return true if MAC is X710/XL710
* @hw: ptr to the hardware info
**/
static inline bool i40e_is_mac_710(struct i40e_hw *hw)
{
if ((hw->mac.type == I40E_MAC_X710) ||
(hw->mac.type == I40E_MAC_XL710))
return true;
return false;
}
/* driver state flags */ /* driver state flags */
enum i40e_state_t { enum i40e_state_t {
__I40E_TESTING, __I40E_TESTING,
...@@ -762,6 +749,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); ...@@ -762,6 +749,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev); void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan); const u8 *macaddr, s16 vlan);
void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi); int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
...@@ -804,7 +792,6 @@ int i40e_lan_add_device(struct i40e_pf *pf); ...@@ -804,7 +792,6 @@ int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf); int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf); void i40e_client_subtask(struct i40e_pf *pf);
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
...@@ -852,12 +839,12 @@ int i40e_close(struct net_device *netdev); ...@@ -852,12 +839,12 @@ int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi); int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid);
void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr); const u8 *macaddr);
int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr); int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE #ifdef I40E_FCOE
......
...@@ -200,41 +200,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) ...@@ -200,41 +200,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
mutex_unlock(&i40e_client_instance_mutex); mutex_unlock(&i40e_client_instance_mutex);
} }
/**
* i40e_notify_client_of_netdev_open - call the client open callback
* @vsi: the VSI with netdev opened
*
* If there is a client to this netdev, call the client with open
**/
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cdev;
int ret = 0;
if (!vsi)
return;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.netdev == vsi->netdev) {
if (!cdev->client ||
!cdev->client->ops || !cdev->client->ops->open) {
dev_dbg(&vsi->back->pdev->dev,
"Cannot locate client instance open routine\n");
continue;
}
if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state))) {
ret = cdev->client->ops->open(&cdev->lan_info,
cdev->client);
if (!ret)
set_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
}
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
/** /**
* i40e_client_release_qvlist * i40e_client_release_qvlist
* @ldev: pointer to L2 context. * @ldev: pointer to L2 context.
......
...@@ -2072,7 +2072,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, ...@@ -2072,7 +2072,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector; struct i40e_q_vector *q_vector;
u16 vector, intrl; u16 vector, intrl;
intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
...@@ -2116,6 +2116,7 @@ static int __i40e_set_coalesce(struct net_device *netdev, ...@@ -2116,6 +2116,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
u16 intrl_reg;
int i; int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
...@@ -2127,8 +2128,9 @@ static int __i40e_set_coalesce(struct net_device *netdev, ...@@ -2127,8 +2128,9 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL; return -EINVAL;
} }
if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n"); netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
INTRL_REG_TO_USEC(I40E_MAX_INTRL));
return -EINVAL; return -EINVAL;
} }
...@@ -2141,7 +2143,12 @@ static int __i40e_set_coalesce(struct net_device *netdev, ...@@ -2141,7 +2143,12 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL; return -EINVAL;
} }
vsi->int_rate_limit = ec->rx_coalesce_usecs_high; intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
vsi->int_rate_limit);
}
if (ec->tx_coalesce_usecs == 0) { if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce) if (ec->use_adaptive_tx_coalesce)
......
...@@ -1434,7 +1434,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -1434,7 +1434,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry(). * instead of list_for_each_entry().
**/ **/
static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
{ {
if (!f) if (!f)
return; return;
...@@ -1477,17 +1477,18 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) ...@@ -1477,17 +1477,18 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
} }
/** /**
* i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans * i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched * @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered * @macaddr: the mac address to be filtered
* *
* Goes through all the macvlan filters and adds a macvlan filter for each * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
* go through all the macvlan filters and add a macvlan filter for each
* unique vlan that already exists. If a PVID has been assigned, instead only * unique vlan that already exists. If a PVID has been assigned, instead only
* add the macaddr to that VLAN. * add the macaddr to that VLAN.
* *
* Returns last filter added on success, else NULL * Returns last filter added on success, else NULL
**/ **/
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr) const u8 *macaddr)
{ {
struct i40e_mac_filter *f, *add = NULL; struct i40e_mac_filter *f, *add = NULL;
...@@ -1498,6 +1499,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, ...@@ -1498,6 +1499,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
return i40e_add_filter(vsi, macaddr, return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid)); le16_to_cpu(vsi->info.pvid));
if (!i40e_is_vsi_in_vlan(vsi))
return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE) if (f->state == I40E_FILTER_REMOVE)
continue; continue;
...@@ -1510,15 +1514,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, ...@@ -1510,15 +1514,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
} }
/** /**
* i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS * i40e_del_mac_filter - Remove a MAC filter from all VLANs
* @vsi: the VSI to be searched * @vsi: the VSI to be searched
* @macaddr: the mac address to be removed * @macaddr: the mac address to be removed
* *
* Removes a given MAC address from a VSI, regardless of VLAN * Removes a given MAC address from a VSI regardless of what VLAN it has been
* associated with.
* *
* Returns 0 for success, or error * Returns 0 for success, or error
**/ **/
int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr) int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
{ {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
struct hlist_node *h; struct hlist_node *h;
...@@ -1579,8 +1584,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1579,8 +1584,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_all_vlan(vsi, netdev->dev_addr); i40e_del_mac_filter(vsi, netdev->dev_addr);
i40e_put_mac_in_vlan(vsi, addr->sa_data); i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) { if (vsi->type == I40E_VSI_MAIN) {
...@@ -1756,14 +1761,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) ...@@ -1756,14 +1761,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_mac_filter *f;
if (i40e_is_vsi_in_vlan(vsi)) if (i40e_add_mac_filter(vsi, addr))
f = i40e_put_mac_in_vlan(vsi, addr);
else
f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
if (f)
return 0; return 0;
else else
return -ENOMEM; return -ENOMEM;
...@@ -1782,10 +1781,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) ...@@ -1782,10 +1781,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
if (i40e_is_vsi_in_vlan(vsi)) i40e_del_mac_filter(vsi, addr);
i40e_del_mac_all_vlan(vsi, addr);
else
i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
return 0; return 0;
} }
...@@ -2568,12 +2564,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) ...@@ -2568,12 +2564,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/** /**
* i40e_vsi_add_vlan - Add VSI membership for given VLAN * i40e_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @vid: VLAN id to be added (0 = untagged only , -1 = any) * @vid: VLAN id to be added
**/ **/
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
{ {
int err; int err;
if (!vid || vsi->info.pvid)
return -EINVAL;
/* Locked once because all functions invoked below iterates list*/ /* Locked once because all functions invoked below iterates list*/
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
err = i40e_add_vlan_all_mac(vsi, vid); err = i40e_add_vlan_all_mac(vsi, vid);
...@@ -2616,10 +2615,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) ...@@ -2616,10 +2615,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/** /**
* i40e_vsi_kill_vlan - Remove VSI membership for given VLAN * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @vid: VLAN id to be removed (0 = untagged only , -1 = any) * @vid: VLAN id to be removed
**/ **/
void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
{ {
if (!vid || vsi->info.pvid)
return;
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_rm_vlan_all_mac(vsi, vid); i40e_rm_vlan_all_mac(vsi, vid);
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
...@@ -3266,7 +3268,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) ...@@ -3266,7 +3268,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr); q_vector->tx.itr);
wr32(hw, I40E_PFINT_RATEN(vector - 1), wr32(hw, I40E_PFINT_RATEN(vector - 1),
INTRL_USEC_TO_REG(vsi->int_rate_limit)); i40e_intrl_usec_to_reg(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */ /* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
...@@ -8682,7 +8684,7 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8682,7 +8684,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.func_caps.fd_filters_best_effort; pf->hw.func_caps.fd_filters_best_effort;
} }
if (i40e_is_mac_710(&pf->hw) && if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4))) { (pf->hw.aq.fw_maj_ver < 4))) {
pf->flags |= I40E_FLAG_RESTART_AUTONEG; pf->flags |= I40E_FLAG_RESTART_AUTONEG;
...@@ -8691,13 +8693,13 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8691,13 +8693,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
} }
/* Disable FW LLDP if FW < v4.3 */ /* Disable FW LLDP if FW < v4.3 */
if (i40e_is_mac_710(&pf->hw) && if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4))) (pf->hw.aq.fw_maj_ver < 4)))
pf->flags |= I40E_FLAG_STOP_FW_LLDP; pf->flags |= I40E_FLAG_STOP_FW_LLDP;
/* Use the FW Set LLDP MIB API if FW > v4.40 */ /* Use the FW Set LLDP MIB API if FW > v4.40 */
if (i40e_is_mac_710(&pf->hw) && if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
(pf->hw.aq.fw_maj_ver >= 5))) (pf->hw.aq.fw_maj_ver >= 5)))
pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
...@@ -9339,7 +9341,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -9339,7 +9341,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
*/ */
i40e_rm_default_mac_filter(vsi, mac_addr); i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else { } else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */ /* relate the VSI_VMDQ name to the VSI_MAIN name */
...@@ -9348,7 +9350,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -9348,7 +9350,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr); random_ether_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
} }
...@@ -9367,7 +9369,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -9367,7 +9369,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
*/ */
eth_broadcast_addr(broadcast); eth_broadcast_addr(broadcast);
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY); i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->dev_addr, mac_addr);
......
...@@ -55,7 +55,7 @@ struct i40e_dma_mem { ...@@ -55,7 +55,7 @@ struct i40e_dma_mem {
void *va; void *va;
dma_addr_t pa; dma_addr_t pa;
u32 size; u32 size;
} __packed; };
#define i40e_allocate_dma_mem(h, m, unused, s, a) \ #define i40e_allocate_dma_mem(h, m, unused, s, a) \
i40e_allocate_dma_mem_d(h, m, s, a) i40e_allocate_dma_mem_d(h, m, s, a)
...@@ -64,7 +64,7 @@ struct i40e_dma_mem { ...@@ -64,7 +64,7 @@ struct i40e_dma_mem {
struct i40e_virt_mem { struct i40e_virt_mem {
void *va; void *va;
u32 size; u32 size;
} __packed; };
#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) #define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
......
...@@ -2251,14 +2251,16 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -2251,14 +2251,16 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
/** /**
* i40e_tso - set up the tso context descriptor * i40e_tso - set up the tso context descriptor
* @skb: ptr to the skb we're sending * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header * @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1 * @cd_type_cmd_tso_mss: Quad Word 1
* *
* Returns 0 if no TSO can happen, 1 if tso is going, or error * Returns 0 if no TSO can happen, 1 if tso is going, or error
**/ **/
static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
u64 *cd_type_cmd_tso_mss)
{ {
struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss; u64 cd_cmd, cd_tso_len, cd_mss;
union { union {
struct iphdr *v4; struct iphdr *v4;
...@@ -2271,6 +2273,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) ...@@ -2271,6 +2273,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u32 paylen, l4_offset; u32 paylen, l4_offset;
u16 gso_segs, gso_size;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -2335,10 +2338,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) ...@@ -2335,10 +2338,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* compute length of segmentation header */ /* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset; *hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
gso_segs = skb_shinfo(skb)->gso_segs;
/* update GSO size and bytecount with header size */
first->gso_segs = gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */ /* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO; cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len; cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size; cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
...@@ -2699,7 +2710,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2699,7 +2710,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs;
u16 desc_count = 1; u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
...@@ -2708,15 +2718,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2708,15 +2718,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_SHIFT; I40E_TX_FLAGS_VLAN_SHIFT;
} }
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
gso_segs = skb_shinfo(skb)->gso_segs;
else
gso_segs = 1;
/* multiply data chunks by size of headers */
first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
first->gso_segs = gso_segs;
first->skb = skb;
first->tx_flags = tx_flags; first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
...@@ -2902,8 +2903,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2902,8 +2903,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
count = i40e_xmit_descriptor_count(skb); count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) { if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) if (__skb_linearize(skb)) {
goto out_drop; dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
count = i40e_txd_use_count(skb->len); count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++; tx_ring->tx_stats.tx_linearize++;
} }
...@@ -2919,6 +2922,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2919,6 +2922,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
/* prepare the xmit flags */ /* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop; goto out_drop;
...@@ -2926,16 +2935,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2926,16 +2935,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* obtain protocol of skb */ /* obtain protocol of skb */
protocol = vlan_get_protocol(skb); protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */ /* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP)) if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4; tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6)) else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6; tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
...@@ -2973,7 +2979,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2973,7 +2979,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_drop: out_drop:
dev_kfree_skb_any(skb); dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -52,7 +52,20 @@ ...@@ -52,7 +52,20 @@
*/ */
#define INTRL_ENA BIT(6) #define INTRL_ENA BIT(6)
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) /**
* i40e_intrl_usec_to_reg - convert interrupt rate limit to register
* @intrl: interrupt rate limit to convert
*
* This function converts a decimal interrupt rate limit to the appropriate
* register format expected by the firmware when setting interrupt rate limit.
*/
static inline u16 i40e_intrl_usec_to_reg(int intrl)
{
if (intrl >> 2)
return ((intrl >> 2) | INTRL_ENA);
else
return 0;
}
#define I40E_INTRL_8K 125 /* 8000 ints/sec */ #define I40E_INTRL_8K 125 /* 8000 ints/sec */
#define I40E_INTRL_62K 16 /* 62500 ints/sec */ #define I40E_INTRL_62K 16 /* 62500 ints/sec */
#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_INTRL_83K 12 /* 83333 ints/sec */
......
...@@ -125,7 +125,6 @@ enum i40e_debug_mask { ...@@ -125,7 +125,6 @@ enum i40e_debug_mask {
*/ */
enum i40e_mac_type { enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0, I40E_MAC_UNKNOWN = 0,
I40E_MAC_X710,
I40E_MAC_XL710, I40E_MAC_XL710,
I40E_MAC_VF, I40E_MAC_VF,
I40E_MAC_X722, I40E_MAC_X722,
......
...@@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) ...@@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) { if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
f = i40e_add_filter(vsi, vf->default_lan_addr.addr, f = i40e_add_mac_filter(vsi,
vf->port_vlan_id ? vf->default_lan_addr.addr);
vf->port_vlan_id : -1);
if (!f) if (!f)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n", "Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id); vf->default_lan_addr.addr, vf->vf_id);
} }
eth_broadcast_addr(broadcast); eth_broadcast_addr(broadcast);
f = i40e_add_filter(vsi, broadcast, f = i40e_add_mac_filter(vsi, broadcast);
vf->port_vlan_id ? vf->port_vlan_id : -1);
if (!f) if (!f)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n"); "Could not allocate VF broadcast filter\n");
...@@ -1942,12 +1940,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1942,12 +1940,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr); f = i40e_find_mac(vsi, al->list[i].addr);
if (!f) { if (!f)
if (i40e_is_vsi_in_vlan(vsi)) f = i40e_add_mac_filter(vsi, al->list[i].addr);
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
else
f = i40e_add_filter(vsi, al->list[i].addr, -1);
}
if (!f) { if (!f) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
...@@ -2012,7 +2006,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2012,7 +2006,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */ /* delete addresses from the list */
for (i = 0; i < al->num_elements; i++) for (i = 0; i < al->num_elements; i++)
if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) { if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR; ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param; goto error_param;
...@@ -2722,14 +2716,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2722,14 +2716,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* delete the temporary mac address */ /* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr)) if (!is_zero_ether_addr(vf->default_lan_addr.addr))
i40e_del_filter(vsi, vf->default_lan_addr.addr, i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
vf->port_vlan_id ? vf->port_vlan_id : -1);
/* Delete all the filters for this VSI - we're going to kill it /* Delete all the filters for this VSI - we're going to kill it
* anyway. * anyway.
*/ */
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
i40e_del_filter(vsi, f->macaddr, f->vlan); __i40e_del_filter(vsi, f);
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
......
...@@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) ...@@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722; hw->mac.type = I40E_MAC_X722;
break; break;
case I40E_DEV_ID_X722_VF: case I40E_DEV_ID_X722_VF:
case I40E_DEV_ID_X722_VF_HV:
hw->mac.type = I40E_MAC_X722_VF; hw->mac.type = I40E_MAC_X722_VF;
break; break;
case I40E_DEV_ID_VF: case I40E_DEV_ID_VF:
......
...@@ -48,7 +48,6 @@ ...@@ -48,7 +48,6 @@
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3 #define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \ (d) == I40E_DEV_ID_QSFP_B || \
......
...@@ -1549,14 +1549,16 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -1549,14 +1549,16 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
/** /**
* i40e_tso - set up the tso context descriptor * i40e_tso - set up the tso context descriptor
* @skb: ptr to the skb we're sending * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header * @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1 * @cd_type_cmd_tso_mss: Quad Word 1
* *
* Returns 0 if no TSO can happen, 1 if tso is going, or error * Returns 0 if no TSO can happen, 1 if tso is going, or error
**/ **/
static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
u64 *cd_type_cmd_tso_mss)
{ {
struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss; u64 cd_cmd, cd_tso_len, cd_mss;
union { union {
struct iphdr *v4; struct iphdr *v4;
...@@ -1569,6 +1571,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) ...@@ -1569,6 +1571,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u32 paylen, l4_offset; u32 paylen, l4_offset;
u16 gso_segs, gso_size;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -1633,10 +1636,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) ...@@ -1633,10 +1636,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* compute length of segmentation header */ /* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset; *hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
gso_segs = skb_shinfo(skb)->gso_segs;
/* update GSO size and bytecount with header size */
first->gso_segs = gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */ /* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO; cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len; cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size; cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
...@@ -1949,7 +1960,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1949,7 +1960,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs;
u16 desc_count = 1; u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
...@@ -1958,15 +1968,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1958,15 +1968,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_SHIFT; I40E_TX_FLAGS_VLAN_SHIFT;
} }
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
gso_segs = skb_shinfo(skb)->gso_segs;
else
gso_segs = 1;
/* multiply data chunks by size of headers */
first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
first->gso_segs = gso_segs;
first->skb = skb;
first->tx_flags = tx_flags; first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
...@@ -2151,8 +2152,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2151,8 +2152,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
count = i40e_xmit_descriptor_count(skb); count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) { if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) if (__skb_linearize(skb)) {
goto out_drop; dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
count = i40e_txd_use_count(skb->len); count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++; tx_ring->tx_stats.tx_linearize++;
} }
...@@ -2168,6 +2171,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2168,6 +2171,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
/* prepare the xmit flags */ /* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop; goto out_drop;
...@@ -2175,16 +2184,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2175,16 +2184,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* obtain protocol of skb */ /* obtain protocol of skb */
protocol = vlan_get_protocol(skb); protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */ /* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP)) if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4; tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6)) else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6; tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
...@@ -2211,7 +2217,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2211,7 +2217,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_drop: out_drop:
dev_kfree_skb_any(skb); dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -100,7 +100,6 @@ enum i40e_debug_mask { ...@@ -100,7 +100,6 @@ enum i40e_debug_mask {
*/ */
enum i40e_mac_type { enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0, I40E_MAC_UNKNOWN = 0,
I40E_MAC_X710,
I40E_MAC_XL710, I40E_MAC_XL710,
I40E_MAC_VF, I40E_MAC_VF,
I40E_MAC_X722, I40E_MAC_X722,
......
...@@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = { ...@@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},
/* required last entry */ /* required last entry */
{0, } {0, }
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment