Commit 8dd5b698 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-04-30

This series contains updates to i40e and i40evf only.

Jake provides majority of the changes in this series, starting with the
renaming of a flag to avoid confusion.  Then renamed a variable to a
more meaningful name to clarify what is actually being done and to
reduce confusion.  Amortizes the wait time when initializing or disabling
lots of VFs by using i40e_reset_all_vfs() and
i40e_vsi_stop_rings_no_wait().  Cleaned up a unnecessary delay since
pci_disable_sriov() already has its own delay, so need to add a additional
delay when removing VFs.  Avoid using the same name flags for both
vsi->state and pf->state, to make code review easier and assist future
work to use the correct state field when checking bits.  Use
DECLARE_BITMAP() to ensure that we always allocate enough space for flags.
Replace hw_disabled_flags with the new _AUTO_DISABLED flags, which are
more readable because we are not setting an *_ENABLED flag to
disable the feature.

Alex corrects a oversight where we were not reprogramming the ports
after a reset, which was causing us to lose all of the receive tunnel
offloads.

Arnd Bergmann moves the declaration of a local variable to avoid a
warning seen on architectures with larger pages about an unused variable.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5b36d8f5 3dfc3eb5
...@@ -125,7 +125,6 @@ enum i40e_state_t { ...@@ -125,7 +125,6 @@ enum i40e_state_t {
__I40E_CONFIG_BUSY, __I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE, __I40E_CONFIG_DONE,
__I40E_DOWN, __I40E_DOWN,
__I40E_NEEDS_RESTART,
__I40E_SERVICE_SCHED, __I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING, __I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING, __I40E_MDD_EVENT_PENDING,
...@@ -138,7 +137,6 @@ enum i40e_state_t { ...@@ -138,7 +137,6 @@ enum i40e_state_t {
__I40E_GLOBAL_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED, __I40E_EMP_RESET_INTR_RECEIVED,
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED, __I40E_SUSPENDED,
__I40E_PTP_TX_IN_PROGRESS, __I40E_PTP_TX_IN_PROGRESS,
__I40E_BAD_EEPROM, __I40E_BAD_EEPROM,
...@@ -147,6 +145,20 @@ enum i40e_state_t { ...@@ -147,6 +145,20 @@ enum i40e_state_t {
__I40E_RESET_FAILED, __I40E_RESET_FAILED,
__I40E_PORT_SUSPENDED, __I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE, __I40E_VF_DISABLE,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
/* VSI state flags */
enum i40e_vsi_state_t {
__I40E_VSI_DOWN,
__I40E_VSI_NEEDS_RESTART,
__I40E_VSI_SYNCING_FILTERS,
__I40E_VSI_OVERFLOW_PROMISC,
__I40E_VSI_REINIT_REQUESTED,
__I40E_VSI_DOWN_REQUESTED,
/* This must be last as it determines the size of the BITMAP */
__I40E_VSI_STATE_SIZE__,
}; };
enum i40e_interrupt_policy { enum i40e_interrupt_policy {
...@@ -245,7 +257,7 @@ struct i40e_tc_configuration { ...@@ -245,7 +257,7 @@ struct i40e_tc_configuration {
struct i40e_udp_port_config { struct i40e_udp_port_config {
/* AdminQ command interface expects port number in Host byte order */ /* AdminQ command interface expects port number in Host byte order */
u16 index; u16 port;
u8 type; u8 type;
}; };
...@@ -322,7 +334,7 @@ struct i40e_flex_pit { ...@@ -322,7 +334,7 @@ struct i40e_flex_pit {
struct i40e_pf { struct i40e_pf {
struct pci_dev *pdev; struct pci_dev *pdev;
struct i40e_hw hw; struct i40e_hw hw;
unsigned long state; DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries; struct msix_entry *msix_entries;
bool fc_autoneg_status; bool fc_autoneg_status;
...@@ -396,6 +408,8 @@ struct i40e_pf { ...@@ -396,6 +408,8 @@ struct i40e_pf {
#define I40E_FLAG_DCB_ENABLED BIT_ULL(20) #define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) #define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24)
#define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_PTP BIT_ULL(25)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) #define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27)
...@@ -428,13 +442,6 @@ struct i40e_pf { ...@@ -428,13 +442,6 @@ struct i40e_pf {
#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57) #define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57)
#define I40E_FLAG_LEGACY_RX BIT_ULL(58) #define I40E_FLAG_LEGACY_RX BIT_ULL(58)
/* Tracks features that are disabled due to hw limitations.
* If a bit is set here, it means that the corresponding
* bit in the 'flags' field is cleared i.e that feature
* is disabled
*/
u64 hw_disabled_flags;
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
bool stat_offsets_loaded; bool stat_offsets_loaded;
struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats;
...@@ -593,7 +600,7 @@ struct i40e_vsi { ...@@ -593,7 +600,7 @@ struct i40e_vsi {
bool stat_offsets_loaded; bool stat_offsets_loaded;
u32 current_netdev_flags; u32 current_netdev_flags;
unsigned long state; DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0) #define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
#define I40E_VSI_FLAG_VEB_OWNER BIT(1) #define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags; unsigned long flags;
......
...@@ -371,8 +371,8 @@ void i40e_client_subtask(struct i40e_pf *pf) ...@@ -371,8 +371,8 @@ void i40e_client_subtask(struct i40e_pf *pf)
cdev = pf->cinst; cdev = pf->cinst;
/* If we're down or resetting, just bail */ /* If we're down or resetting, just bail */
if (test_bit(__I40E_DOWN, &pf->state) || if (test_bit(__I40E_DOWN, pf->state) ||
test_bit(__I40E_CONFIG_BUSY, &pf->state)) test_bit(__I40E_CONFIG_BUSY, pf->state))
return; return;
if (!client || !cdev) if (!client || !cdev)
...@@ -382,7 +382,7 @@ void i40e_client_subtask(struct i40e_pf *pf) ...@@ -382,7 +382,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
* the netdev is up, then open the client. * the netdev is up, then open the client.
*/ */
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (!test_bit(__I40E_DOWN, &vsi->state) && if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
client->ops && client->ops->open) { client->ops && client->ops->open) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
ret = client->ops->open(&cdev->lan_info, client); ret = client->ops->open(&cdev->lan_info, client);
...@@ -397,7 +397,7 @@ void i40e_client_subtask(struct i40e_pf *pf) ...@@ -397,7 +397,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Likewise for client close. If the client is up, but the netdev /* Likewise for client close. If the client is up, but the netdev
* is down, then close the client. * is down, then close the client.
*/ */
if (test_bit(__I40E_DOWN, &vsi->state) && if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
client->ops && client->ops->close) { client->ops && client->ops->close) {
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
client->ops->close(&cdev->lan_info, client, false); client->ops->close(&cdev->lan_info, client, false);
...@@ -503,7 +503,7 @@ static void i40e_client_release(struct i40e_client *client) ...@@ -503,7 +503,7 @@ static void i40e_client_release(struct i40e_client *client)
continue; continue;
while (test_and_set_bit(__I40E_SERVICE_SCHED, while (test_and_set_bit(__I40E_SERVICE_SCHED,
&pf->state)) pf->state))
usleep_range(500, 1000); usleep_range(500, 1000);
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
...@@ -521,7 +521,7 @@ static void i40e_client_release(struct i40e_client *client) ...@@ -521,7 +521,7 @@ static void i40e_client_release(struct i40e_client *client)
i40e_client_del_instance(pf); i40e_client_del_instance(pf);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name); client->name);
clear_bit(__I40E_SERVICE_SCHED, &pf->state); clear_bit(__I40E_SERVICE_SCHED, pf->state);
} }
mutex_unlock(&i40e_device_mutex); mutex_unlock(&i40e_device_mutex);
} }
...@@ -661,10 +661,10 @@ static void i40e_client_request_reset(struct i40e_info *ldev, ...@@ -661,10 +661,10 @@ static void i40e_client_request_reset(struct i40e_info *ldev,
switch (reset_level) { switch (reset_level) {
case I40E_CLIENT_RESET_LEVEL_PF: case I40E_CLIENT_RESET_LEVEL_PF:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break; break;
case I40E_CLIENT_RESET_LEVEL_CORE: case I40E_CLIENT_RESET_LEVEL_CORE:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break; break;
default: default:
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
......
...@@ -158,9 +158,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -158,9 +158,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" vlgrp: & = %p\n", vsi->active_vlans); " vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->state, vsi->flags, vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
vsi->netdev_registered, vsi->current_netdev_flags); for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
dev_info(&pf->pdev->dev,
" state[%d] = %08lx\n",
i, vsi->state[i]);
if (vsi == pf->vsi[pf->lan_vsi]) if (vsi == pf->vsi[pf->lan_vsi])
dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
pf->hw.mac.addr, pf->hw.mac.addr,
...@@ -174,7 +177,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -174,7 +177,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
} }
dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n", dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold, vsi->active_filters, vsi->promisc_threshold,
(test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ? (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
"ON" : "OFF")); "ON" : "OFF"));
nstat = i40e_get_vsi_stats_struct(vsi); nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
...@@ -1706,7 +1709,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, ...@@ -1706,7 +1709,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
} else if (!vsi->netdev) { } else if (!vsi->netdev) {
dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
vsi_seid); vsi_seid);
} else if (test_bit(__I40E_DOWN, &vsi->state)) { } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
vsi_seid); vsi_seid);
} else if (rtnl_trylock()) { } else if (rtnl_trylock()) {
......
...@@ -757,7 +757,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, ...@@ -757,7 +757,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings))) if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) { while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--; timeout--;
if (!timeout) if (!timeout)
return -EBUSY; return -EBUSY;
...@@ -891,7 +891,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, ...@@ -891,7 +891,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
} }
done: done:
clear_bit(__I40E_CONFIG_BUSY, &pf->state); clear_bit(__I40E_CONFIG_BUSY, pf->state);
return err; return err;
} }
...@@ -987,7 +987,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, ...@@ -987,7 +987,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
} }
/* If we have link and don't have autoneg */ /* If we have link and don't have autoneg */
if (!test_bit(__I40E_DOWN, &pf->state) && if (!test_bit(__I40E_DOWN, pf->state) &&
!(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
/* Send message that it might not necessarily work*/ /* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
...@@ -1039,10 +1039,10 @@ static int i40e_set_pauseparam(struct net_device *netdev, ...@@ -1039,10 +1039,10 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN; err = -EAGAIN;
} }
if (!test_bit(__I40E_DOWN, &pf->state)) { if (!test_bit(__I40E_DOWN, pf->state)) {
/* Give it a little more time to try to come back */ /* Give it a little more time to try to come back */
msleep(75); msleep(75);
if (!test_bit(__I40E_DOWN, &pf->state)) if (!test_bit(__I40E_DOWN, pf->state))
return i40e_nway_reset(netdev); return i40e_nway_reset(netdev);
} }
...@@ -1139,8 +1139,8 @@ static int i40e_get_eeprom(struct net_device *netdev, ...@@ -1139,8 +1139,8 @@ static int i40e_get_eeprom(struct net_device *netdev,
/* make sure it is the right magic for NVMUpdate */ /* make sure it is the right magic for NVMUpdate */
if ((eeprom->magic >> 16) != hw->device_id) if ((eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL; errno = -EINVAL;
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY; errno = -EBUSY;
else else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
...@@ -1246,8 +1246,8 @@ static int i40e_set_eeprom(struct net_device *netdev, ...@@ -1246,8 +1246,8 @@ static int i40e_set_eeprom(struct net_device *netdev,
/* check for NVMUpdate access method */ /* check for NVMUpdate access method */
else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL; errno = -EINVAL;
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY; errno = -EBUSY;
else else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
...@@ -1332,7 +1332,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -1332,7 +1332,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count)) (new_rx_count == vsi->rx_rings[0]->count))
return 0; return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) { while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--; timeout--;
if (!timeout) if (!timeout)
return -EBUSY; return -EBUSY;
...@@ -1485,7 +1485,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -1485,7 +1485,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
} }
done: done:
clear_bit(__I40E_CONFIG_BUSY, &pf->state); clear_bit(__I40E_CONFIG_BUSY, pf->state);
return err; return err;
} }
...@@ -1826,7 +1826,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf) ...@@ -1826,7 +1826,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
int i; int i;
for (i = 0; i < pf->num_alloc_vfs; i++) for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states)) if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states))
return true; return true;
return false; return false;
} }
...@@ -1847,7 +1847,7 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1847,7 +1847,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* Offline tests */ /* Offline tests */
netif_info(pf, drv, netdev, "offline testing starting\n"); netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state); set_bit(__I40E_TESTING, pf->state);
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
...@@ -1857,7 +1857,7 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1857,7 +1857,7 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 1; data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LINK] = 1; data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state); clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests; goto skip_ol_tests;
} }
...@@ -1886,7 +1886,7 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1886,7 +1886,7 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state); clear_bit(__I40E_TESTING, pf->state);
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
if (if_running) if (if_running)
...@@ -2924,11 +2924,11 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, ...@@ -2924,11 +2924,11 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
int ret = 0; int ret = 0;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY; return -EBUSY;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY; return -EBUSY;
ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
...@@ -3643,14 +3643,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, ...@@ -3643,14 +3643,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED) if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)
return -ENOSPC; return -ENOSPC;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY; return -EBUSY;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY; return -EBUSY;
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
...@@ -4086,12 +4086,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -4086,12 +4086,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
/* Flush current ATR settings if ATR was disabled */ /* Flush current ATR settings if ATR was disabled */
if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) { !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
} }
/* Only allow ATR evict on hardware that is capable of handling it */ /* Only allow ATR evict on hardware that is capable of handling it */
if (pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
......
...@@ -47,7 +47,7 @@ static const char i40e_driver_string[] = ...@@ -47,7 +47,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 2 #define DRV_VERSION_MAJOR 2
#define DRV_VERSION_MINOR 1 #define DRV_VERSION_MINOR 1
#define DRV_VERSION_BUILD 7 #define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -295,8 +295,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) ...@@ -295,8 +295,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
**/ **/
void i40e_service_event_schedule(struct i40e_pf *pf) void i40e_service_event_schedule(struct i40e_pf *pf)
{ {
if (!test_bit(__I40E_DOWN, &pf->state) && if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
queue_work(i40e_wq, &pf->service_task); queue_work(i40e_wq, &pf->service_task);
} }
...@@ -377,13 +377,13 @@ static void i40e_tx_timeout(struct net_device *netdev) ...@@ -377,13 +377,13 @@ static void i40e_tx_timeout(struct net_device *netdev)
switch (pf->tx_timeout_recovery_level) { switch (pf->tx_timeout_recovery_level) {
case 1: case 1:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break; break;
case 2: case 2:
set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
break; break;
case 3: case 3:
set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break; break;
default: default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
...@@ -422,7 +422,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, ...@@ -422,7 +422,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i; int i;
if (test_bit(__I40E_DOWN, &vsi->state)) if (test_bit(__I40E_VSI_DOWN, vsi->state))
return; return;
if (!vsi->tx_rings) if (!vsi->tx_rings)
...@@ -753,8 +753,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -753,8 +753,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u64 tx_p, tx_b; u64 tx_p, tx_b;
u16 q; u16 q;
if (test_bit(__I40E_DOWN, &vsi->state) || if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
test_bit(__I40E_CONFIG_BUSY, &pf->state)) test_bit(__I40E_CONFIG_BUSY, pf->state))
return; return;
ns = i40e_get_vsi_stats_struct(vsi); ns = i40e_get_vsi_stats_struct(vsi);
...@@ -1050,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) ...@@ -1050,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_lpi_count, &nsd->rx_lpi_count); &osd->rx_lpi_count, &nsd->rx_lpi_count);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED && if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
!(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
nsd->fd_sb_status = true; nsd->fd_sb_status = true;
else else
nsd->fd_sb_status = false; nsd->fd_sb_status = false;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
!(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
nsd->fd_atr_status = true; nsd->fd_atr_status = true;
else else
nsd->fd_atr_status = false; nsd->fd_atr_status = false;
...@@ -1346,7 +1346,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -1346,7 +1346,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* to failed, so we don't bother to try sending the filter * to failed, so we don't bother to try sending the filter
* to the hardware. * to the hardware.
*/ */
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
f->state = I40E_FILTER_FAILED; f->state = I40E_FILTER_FAILED;
else else
f->state = I40E_FILTER_NEW; f->state = I40E_FILTER_NEW;
...@@ -1525,8 +1525,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1525,8 +1525,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return 0; return 0;
} }
if (test_bit(__I40E_DOWN, &vsi->back->state) || if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
if (ether_addr_equal(hw->mac.addr, addr->sa_data)) if (ether_addr_equal(hw->mac.addr, addr->sa_data))
...@@ -1920,7 +1920,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, ...@@ -1920,7 +1920,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (fcnt != num_add) { if (fcnt != num_add) {
*promisc_changed = true; *promisc_changed = true;
set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev, dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n", "Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err), i40e_aq_str(hw, aq_err),
...@@ -2003,7 +2003,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2003,7 +2003,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_add_macvlan_element_data *add_list;
struct i40e_aqc_remove_macvlan_element_data *del_list; struct i40e_aqc_remove_macvlan_element_data *del_list;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
pf = vsi->back; pf = vsi->back;
...@@ -2139,8 +2139,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2139,8 +2139,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_add = 0; num_add = 0;
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
&vsi->state)) { vsi->state)) {
new->state = I40E_FILTER_FAILED; new->state = I40E_FILTER_FAILED;
continue; continue;
} }
...@@ -2227,20 +2227,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2227,20 +2227,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
* safely exit if we didn't just enter, we no longer have any failed * safely exit if we didn't just enter, we no longer have any failed
* filters, and we have reduced filters below the threshold value. * filters, and we have reduced filters below the threshold value.
*/ */
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
!promisc_changed && !failed_filters && !promisc_changed && !failed_filters &&
(vsi->active_filters < vsi->promisc_threshold)) { (vsi->active_filters < vsi->promisc_threshold)) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n", "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
vsi_name); vsi_name);
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
promisc_changed = true; promisc_changed = true;
vsi->promisc_threshold = 0; vsi->promisc_threshold = 0;
} }
/* if the VF is not trusted do not do promisc */ /* if the VF is not trusted do not do promisc */
if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
goto out; goto out;
} }
...@@ -2265,12 +2265,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2265,12 +2265,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
} }
if ((changed_flags & IFF_PROMISC) || if ((changed_flags & IFF_PROMISC) ||
(promisc_changed && (promisc_changed &&
test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) { test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) {
bool cur_promisc; bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC, test_bit(__I40E_VSI_OVERFLOW_PROMISC,
&vsi->state)); vsi->state));
if ((vsi->type == I40E_VSI_MAIN) && if ((vsi->type == I40E_VSI_MAIN) &&
(pf->lan_veb != I40E_NO_VEB) && (pf->lan_veb != I40E_NO_VEB) &&
!(pf->flags & I40E_FLAG_MFP_ENABLED)) { !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
...@@ -2353,7 +2353,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2353,7 +2353,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (retval) if (retval)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
clear_bit(__I40E_CONFIG_BUSY, &vsi->state); clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return retval; return retval;
err_no_memory: err_no_memory:
...@@ -2365,7 +2365,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2365,7 +2365,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
clear_bit(__I40E_CONFIG_BUSY, &vsi->state); clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return -ENOMEM; return -ENOMEM;
} }
...@@ -3611,29 +3611,29 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -3611,29 +3611,29 @@ static irqreturn_t i40e_intr(int irq, void *data)
* this is not a performance path and napi_schedule() * this is not a performance path and napi_schedule()
* can deal with rescheduling. * can deal with rescheduling.
*/ */
if (!test_bit(__I40E_DOWN, &pf->state)) if (!test_bit(__I40E_VSI_DOWN, pf->state))
napi_schedule_irqoff(&q_vector->napi); napi_schedule_irqoff(&q_vector->napi);
} }
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
} }
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
} }
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
} }
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
val = rd32(hw, I40E_GLGEN_RSTAT); val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
...@@ -3644,7 +3644,7 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -3644,7 +3644,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->globr_count++; pf->globr_count++;
} else if (val == I40E_RESET_EMPR) { } else if (val == I40E_RESET_EMPR) {
pf->empr_count++; pf->empr_count++;
set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
} }
} }
...@@ -3677,7 +3677,7 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -3677,7 +3677,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
dev_info(&pf->pdev->dev, "device will be reset\n"); dev_info(&pf->pdev->dev, "device will be reset\n");
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
} }
ena_mask &= ~icr0_remaining; ena_mask &= ~icr0_remaining;
...@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
enable_intr: enable_intr:
/* re-enable interrupt causes */ /* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) { if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf, false); i40e_irq_dynamic_enable_icr0(pf, false);
} }
...@@ -3907,7 +3907,7 @@ static void i40e_netpoll(struct net_device *netdev) ...@@ -3907,7 +3907,7 @@ static void i40e_netpoll(struct net_device *netdev)
int i; int i;
/* if interface is down do nothing */ /* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &vsi->state)) if (test_bit(__I40E_VSI_DOWN, vsi->state))
return; return;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
...@@ -4144,7 +4144,7 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) ...@@ -4144,7 +4144,7 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
void i40e_vsi_stop_rings(struct i40e_vsi *vsi) void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{ {
/* When port TX is suspended, don't wait */ /* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, &vsi->back->state)) if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi); return i40e_vsi_stop_rings_no_wait(vsi);
/* do rx first for enable and last for disable /* do rx first for enable and last for disable
...@@ -4436,14 +4436,14 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) ...@@ -4436,14 +4436,14 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
static void i40e_vsi_close(struct i40e_vsi *vsi) static void i40e_vsi_close(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
i40e_down(vsi); i40e_down(vsi);
i40e_vsi_free_irq(vsi); i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi); i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0; vsi->current_netdev_flags = 0;
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
pf->flags |= I40E_FLAG_CLIENT_RESET; pf->flags |= I40E_FLAG_CLIENT_RESET;
} }
...@@ -4453,10 +4453,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) ...@@ -4453,10 +4453,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
**/ **/
static void i40e_quiesce_vsi(struct i40e_vsi *vsi) static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
{ {
if (test_bit(__I40E_DOWN, &vsi->state)) if (test_bit(__I40E_VSI_DOWN, vsi->state))
return; return;
set_bit(__I40E_NEEDS_RESTART, &vsi->state); set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev)) if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
else else
...@@ -4469,10 +4469,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) ...@@ -4469,10 +4469,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
**/ **/
static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
{ {
if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
return; return;
clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev)) if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev); vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else else
...@@ -4638,8 +4637,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) ...@@ -4638,8 +4637,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf)
return; return;
/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
if (test_bit(__I40E_DOWN, &vsi->back->state) || if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return; return;
/* Make sure type is MAIN VSI */ /* Make sure type is MAIN VSI */
...@@ -5186,7 +5185,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) ...@@ -5186,7 +5185,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */ /* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
} }
...@@ -5354,7 +5353,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) ...@@ -5354,7 +5353,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if (err) if (err)
return err; return err;
clear_bit(__I40E_DOWN, &vsi->state); clear_bit(__I40E_VSI_DOWN, vsi->state);
i40e_napi_enable_all(vsi); i40e_napi_enable_all(vsi);
i40e_vsi_enable_irq(vsi); i40e_vsi_enable_irq(vsi);
...@@ -5403,12 +5402,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) ...@@ -5403,12 +5402,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
i40e_down(vsi); i40e_down(vsi);
i40e_up(vsi); i40e_up(vsi);
clear_bit(__I40E_CONFIG_BUSY, &pf->state); clear_bit(__I40E_CONFIG_BUSY, pf->state);
} }
/** /**
...@@ -5435,7 +5434,7 @@ void i40e_down(struct i40e_vsi *vsi) ...@@ -5435,7 +5434,7 @@ void i40e_down(struct i40e_vsi *vsi)
int i; int i;
/* It is assumed that the caller of this function /* It is assumed that the caller of this function
* sets the vsi->state __I40E_DOWN bit. * sets the vsi->state __I40E_VSI_DOWN bit.
*/ */
if (vsi->netdev) { if (vsi->netdev) {
netif_carrier_off(vsi->netdev); netif_carrier_off(vsi->netdev);
...@@ -5541,8 +5540,8 @@ int i40e_open(struct net_device *netdev) ...@@ -5541,8 +5540,8 @@ int i40e_open(struct net_device *netdev)
int err; int err;
/* disallow open during test or if eeprom is broken */ /* disallow open during test or if eeprom is broken */
if (test_bit(__I40E_TESTING, &pf->state) || if (test_bit(__I40E_TESTING, pf->state) ||
test_bit(__I40E_BAD_EEPROM, &pf->state)) test_bit(__I40E_BAD_EEPROM, pf->state))
return -EBUSY; return -EBUSY;
netif_carrier_off(netdev); netif_carrier_off(netdev);
...@@ -5787,10 +5786,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) ...@@ -5787,10 +5786,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
struct i40e_vsi *vsi = pf->vsi[v]; struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL && if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
vsi->state))
i40e_vsi_reinit_locked(pf->vsi[v]); i40e_vsi_reinit_locked(pf->vsi[v]);
clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
}
} }
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v; int v;
...@@ -5801,10 +5799,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) ...@@ -5801,10 +5799,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
struct i40e_vsi *vsi = pf->vsi[v]; struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL && if (vsi != NULL &&
test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
set_bit(__I40E_DOWN, &vsi->state); vsi->state)) {
set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi); i40e_down(vsi);
clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
} }
} }
} else { } else {
...@@ -5944,7 +5942,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ...@@ -5944,7 +5942,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
else else
pf->flags &= ~I40E_FLAG_DCB_ENABLED; pf->flags &= ~I40E_FLAG_DCB_ENABLED;
set_bit(__I40E_PORT_SUSPENDED, &pf->state); set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */ /* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf); i40e_pf_quiesce_all_vsi(pf);
...@@ -5953,7 +5951,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ...@@ -5953,7 +5951,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_resume_port_tx(pf); ret = i40e_resume_port_tx(pf);
clear_bit(__I40E_PORT_SUSPENDED, &pf->state); clear_bit(__I40E_PORT_SUSPENDED, pf->state);
/* In case of error no point in resuming VSIs */ /* In case of error no point in resuming VSIs */
if (ret) if (ret)
goto exit; goto exit;
...@@ -5962,7 +5960,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ...@@ -5962,7 +5960,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_pf_wait_queues_disabled(pf); ret = i40e_pf_wait_queues_disabled(pf);
if (ret) { if (ret) {
/* Schedule PF reset to recover */ /* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
} else { } else {
i40e_pf_unquiesce_all_vsi(pf); i40e_pf_unquiesce_all_vsi(pf);
...@@ -6077,34 +6075,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) ...@@ -6077,34 +6075,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
u32 fcnt_prog, fcnt_avail; u32 fcnt_prog, fcnt_avail;
struct hlist_node *node; struct hlist_node *node;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return; return;
/* Check if, FD SB or ATR was auto disabled and if there is enough room /* Check if we have enough room to re-enable FDir SB capability. */
* to re-enable
*/
fcnt_prog = i40e_get_global_fd_count(pf); fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count; fcnt_avail = pf->fdir_pf_filter_count;
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
(pf->fd_add_err == 0) || (pf->fd_add_err == 0) ||
(i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED; if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
if (I40E_DEBUG_FD & pf->hw.debug_mask) (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
} }
} }
/* Wait for some more space to be available to turn on ATR. We also /* We should wait for even more space before re-enabling ATR.
* must check that no existing ntuple rules for TCP are in effect * Additionally, we cannot enable ATR as long as we still have TCP SB
* rules active.
*/ */
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->fd_tcp4_filter_cnt == 0)) {
(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) && if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
(pf->fd_tcp4_filter_cnt == 0)) { pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED; if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
if (I40E_DEBUG_FD & pf->hw.debug_mask) (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
} }
} }
...@@ -6155,7 +6152,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) ...@@ -6155,7 +6152,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} }
pf->fd_flush_timestamp = jiffies; pf->fd_flush_timestamp = jiffies;
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
/* flush all filters */ /* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1, wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
...@@ -6175,8 +6172,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) ...@@ -6175,8 +6172,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */ /* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr && !pf->fd_tcp4_filter_cnt) if (!disable_atr && !pf->fd_tcp4_filter_cnt)
pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED; pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask) if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
} }
...@@ -6206,10 +6203,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) ...@@ -6206,10 +6203,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
{ {
/* if interface is down do nothing */ /* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &pf->state)) if (test_bit(__I40E_VSI_DOWN, pf->state))
return; return;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
i40e_fdir_flush_and_replay(pf); i40e_fdir_flush_and_replay(pf);
i40e_fdir_check_and_reenable(pf); i40e_fdir_check_and_reenable(pf);
...@@ -6223,7 +6220,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) ...@@ -6223,7 +6220,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
**/ **/
static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
{ {
if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
return; return;
switch (vsi->type) { switch (vsi->type) {
...@@ -6316,11 +6313,11 @@ static void i40e_link_event(struct i40e_pf *pf) ...@@ -6316,11 +6313,11 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link && if (new_link == old_link &&
new_link_speed == old_link_speed && new_link_speed == old_link_speed &&
(test_bit(__I40E_DOWN, &vsi->state) || (test_bit(__I40E_VSI_DOWN, vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev))) new_link == netif_carrier_ok(vsi->netdev)))
return; return;
if (!test_bit(__I40E_DOWN, &vsi->state)) if (!test_bit(__I40E_VSI_DOWN, vsi->state))
i40e_print_link_message(vsi, new_link); i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to /* Notify the base of the switch tree connected to
...@@ -6347,8 +6344,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) ...@@ -6347,8 +6344,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
int i; int i;
/* if interface is down do nothing */ /* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &pf->state) || if (test_bit(__I40E_VSI_DOWN, pf->state) ||
test_bit(__I40E_CONFIG_BUSY, &pf->state)) test_bit(__I40E_CONFIG_BUSY, pf->state))
return; return;
/* make sure we don't do these things too often */ /* make sure we don't do these things too often */
...@@ -6386,31 +6383,31 @@ static void i40e_reset_subtask(struct i40e_pf *pf) ...@@ -6386,31 +6383,31 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{ {
u32 reset_flags = 0; u32 reset_flags = 0;
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_REINIT_REQUESTED); reset_flags |= BIT(__I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state); clear_bit(__I40E_REINIT_REQUESTED, pf->state);
} }
if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
} }
if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
} }
if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
} }
if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_DOWN_REQUESTED); reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
clear_bit(__I40E_DOWN_REQUESTED, &pf->state); clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
} }
/* If there's a recovery already waiting, it takes /* If there's a recovery already waiting, it takes
* precedence before starting a new reset sequence. * precedence before starting a new reset sequence.
*/ */
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
i40e_prep_for_reset(pf, false); i40e_prep_for_reset(pf, false);
i40e_reset(pf); i40e_reset(pf);
i40e_rebuild(pf, false, false); i40e_rebuild(pf, false, false);
...@@ -6418,8 +6415,8 @@ static void i40e_reset_subtask(struct i40e_pf *pf) ...@@ -6418,8 +6415,8 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
/* If we're already down or resetting, just bail */ /* If we're already down or resetting, just bail */
if (reset_flags && if (reset_flags &&
!test_bit(__I40E_DOWN, &pf->state) && !test_bit(__I40E_VSI_DOWN, pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state)) { !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
rtnl_lock(); rtnl_lock();
i40e_do_reset(pf, reset_flags, true); i40e_do_reset(pf, reset_flags, true);
rtnl_unlock(); rtnl_unlock();
...@@ -6468,7 +6465,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) ...@@ -6468,7 +6465,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u32 val; u32 val;
/* Do not run clean AQ when PF reset fails */ /* Do not run clean AQ when PF reset fails */
if (test_bit(__I40E_RESET_FAILED, &pf->state)) if (test_bit(__I40E_RESET_FAILED, pf->state))
return; return;
/* check for error indications */ /* check for error indications */
...@@ -6572,7 +6569,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) ...@@ -6572,7 +6569,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
} while (i++ < pf->adminq_work_limit); } while (i++ < pf->adminq_work_limit);
if (i < pf->adminq_work_limit) if (i < pf->adminq_work_limit)
clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
/* re-enable Admin queue interrupt cause */ /* re-enable Admin queue interrupt cause */
val = rd32(hw, I40E_PFINT_ICR0_ENA); val = rd32(hw, I40E_PFINT_ICR0_ENA);
...@@ -6598,13 +6595,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) ...@@ -6598,13 +6595,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
if (err) { if (err) {
dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
err); err);
set_bit(__I40E_BAD_EEPROM, &pf->state); set_bit(__I40E_BAD_EEPROM, pf->state);
} }
} }
if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
clear_bit(__I40E_BAD_EEPROM, &pf->state); clear_bit(__I40E_BAD_EEPROM, pf->state);
} }
} }
...@@ -6922,8 +6919,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) ...@@ -6922,8 +6919,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
i40e_status ret = 0; i40e_status ret = 0;
u32 v; u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return; return;
if (i40e_check_asq_alive(&pf->hw)) if (i40e_check_asq_alive(&pf->hw))
i40e_vc_notify_reset(pf); i40e_vc_notify_reset(pf);
...@@ -6982,8 +6979,8 @@ static int i40e_reset(struct i40e_pf *pf) ...@@ -6982,8 +6979,8 @@ static int i40e_reset(struct i40e_pf *pf)
ret = i40e_pf_reset(hw); ret = i40e_pf_reset(hw);
if (ret) { if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
set_bit(__I40E_RESET_FAILED, &pf->state); set_bit(__I40E_RESET_FAILED, pf->state);
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
} else { } else {
pf->pfr_count++; pf->pfr_count++;
} }
...@@ -7005,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ...@@ -7005,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
u32 val; u32 val;
int v; int v;
if (test_bit(__I40E_DOWN, &pf->state)) if (test_bit(__I40E_VSI_DOWN, pf->state))
goto clear_recovery; goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
...@@ -7019,7 +7016,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ...@@ -7019,7 +7016,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
} }
/* re-verify the eeprom if we just had an EMP reset */ /* re-verify the eeprom if we just had an EMP reset */
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf); i40e_verify_eeprom(pf);
i40e_clear_pxe_mode(hw); i40e_clear_pxe_mode(hw);
...@@ -7182,9 +7179,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ...@@ -7182,9 +7179,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (!lock_acquired) if (!lock_acquired)
rtnl_unlock(); rtnl_unlock();
end_core_reset: end_core_reset:
clear_bit(__I40E_RESET_FAILED, &pf->state); clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery: clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
} }
/** /**
...@@ -7237,7 +7234,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) ...@@ -7237,7 +7234,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg; u32 reg;
int i; int i;
if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
return; return;
/* find what triggered the MDD event */ /* find what triggered the MDD event */
...@@ -7289,7 +7286,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) ...@@ -7289,7 +7286,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
} }
/* Queue belongs to the PF, initiate a reset */ /* Queue belongs to the PF, initiate a reset */
if (pf_mdd_detected) { if (pf_mdd_detected) {
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
} }
} }
...@@ -7318,18 +7315,35 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) ...@@ -7318,18 +7315,35 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
"Too many MDD events on VF %d, disabled\n", i); "Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n"); "Use PF Control I/F to re-enable the VF\n");
set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
} }
} }
/* re-enable mdd interrupt cause */ /* re-enable mdd interrupt cause */
clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg); wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw); i40e_flush(hw);
} }
/**
* i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
* @pf: board private structure
**/
static void i40e_sync_udp_filters(struct i40e_pf *pf)
{
int i;
/* loop through and set pending bit for all active UDP filters */
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->udp_ports[i].port)
pf->pending_udp_bitmap |= BIT_ULL(i);
}
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
}
/** /**
* i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
* @pf: board private structure * @pf: board private structure
...@@ -7349,7 +7363,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ...@@ -7349,7 +7363,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) { if (pf->pending_udp_bitmap & BIT_ULL(i)) {
pf->pending_udp_bitmap &= ~BIT_ULL(i); pf->pending_udp_bitmap &= ~BIT_ULL(i);
port = pf->udp_ports[i].index; port = pf->udp_ports[i].port;
if (port) if (port)
ret = i40e_aq_add_udp_tunnel(hw, port, ret = i40e_aq_add_udp_tunnel(hw, port,
pf->udp_ports[i].type, pf->udp_ports[i].type,
...@@ -7366,7 +7380,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ...@@ -7366,7 +7380,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status)); pf->hw.aq.asq_last_status));
pf->udp_ports[i].index = 0; pf->udp_ports[i].port = 0;
} }
} }
} }
...@@ -7384,11 +7398,10 @@ static void i40e_service_task(struct work_struct *work) ...@@ -7384,11 +7398,10 @@ static void i40e_service_task(struct work_struct *work)
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */ /* don't bother with service tasks if a reset is in progress */
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return; return;
}
if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return; return;
i40e_detect_recover_hung(pf); i40e_detect_recover_hung(pf);
...@@ -7416,16 +7429,16 @@ static void i40e_service_task(struct work_struct *work) ...@@ -7416,16 +7429,16 @@ static void i40e_service_task(struct work_struct *work)
/* flush memory to make sure state is correct before next watchdog */ /* flush memory to make sure state is correct before next watchdog */
smp_mb__before_atomic(); smp_mb__before_atomic();
clear_bit(__I40E_SERVICE_SCHED, &pf->state); clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* If the tasks have taken longer than one timer cycle or there /* If the tasks have taken longer than one timer cycle or there
* is more work to be done, reschedule the service task now * is more work to be done, reschedule the service task now
* rather than wait for the timer to tick again. * rather than wait for the timer to tick again.
*/ */
if (time_after(jiffies, (start_time + pf->service_timer_period)) || if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
} }
...@@ -7574,7 +7587,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -7574,7 +7587,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
} }
vsi->type = type; vsi->type = type;
vsi->back = pf; vsi->back = pf;
set_bit(__I40E_DOWN, &vsi->state); set_bit(__I40E_VSI_DOWN, vsi->state);
vsi->flags = 0; vsi->flags = 0;
vsi->idx = vsi_idx; vsi->idx = vsi_idx;
vsi->int_rate_limit = 0; vsi->int_rate_limit = 0;
...@@ -8156,7 +8169,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) ...@@ -8156,7 +8169,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* Only request the irq if this is the first time through, and /* Only request the irq if this is the first time through, and
* not when we're rebuilding after a Reset * not when we're rebuilding after a Reset
*/ */
if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
err = request_irq(pf->msix_entries[0].vector, err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf); i40e_intr, 0, pf->int_name, pf);
if (err) { if (err) {
...@@ -8808,9 +8821,9 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8808,9 +8821,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.aq.api_min_ver > 4))) { (pf->hw.aq.api_min_ver > 4))) {
/* Supported in FW API version higher than 1.4 */ /* Supported in FW API version higher than 1.4 */
pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} else { } else {
pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} }
pf->eeprom_version = 0xDEAD; pf->eeprom_version = 0xDEAD;
...@@ -8870,16 +8883,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) ...@@ -8870,16 +8883,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
need_reset = true; need_reset = true;
i40e_fdir_filter_exit(pf); i40e_fdir_filter_exit(pf);
} }
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED; I40E_FLAG_FD_SB_AUTO_DISABLED);
/* reset fd counters */ /* reset fd counters */
pf->fd_add_err = 0; pf->fd_add_err = 0;
pf->fd_atr_cnt = 0; pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */ /* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) { pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED; if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
if (I40E_DEBUG_FD & pf->hw.debug_mask) (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
} }
} }
...@@ -8953,7 +8966,7 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) ...@@ -8953,7 +8966,7 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
u8 i; u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->udp_ports[i].index == port) if (pf->udp_ports[i].port == port)
return i; return i;
} }
...@@ -9006,7 +9019,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, ...@@ -9006,7 +9019,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
} }
/* New port: add it and mark its index in the bitmap */ /* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].index = port; pf->udp_ports[next_idx].port = port;
pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
} }
...@@ -9047,7 +9060,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, ...@@ -9047,7 +9060,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
/* if port exists, set it to 0 (mark for deletion) /* if port exists, set it to 0 (mark for deletion)
* and make it pending * and make it pending
*/ */
pf->udp_ports[idx].index = 0; pf->udp_ports[idx].port = 0;
pf->pending_udp_bitmap |= BIT_ULL(idx); pf->pending_udp_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
...@@ -9701,7 +9714,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9701,7 +9714,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
} }
vsi->active_filters = 0; vsi->active_filters = 0;
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */ /* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
...@@ -9754,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) ...@@ -9754,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
return -ENODEV; return -ENODEV;
} }
if (vsi == pf->vsi[pf->lan_vsi] && if (vsi == pf->vsi[pf->lan_vsi] &&
!test_bit(__I40E_DOWN, &pf->state)) { !test_bit(__I40E_VSI_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV; return -ENODEV;
} }
...@@ -10738,6 +10751,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) ...@@ -10738,6 +10751,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf); i40e_ptp_init(pf);
/* repopulate tunnel port filters */
i40e_sync_udp_filters(pf);
return ret; return ret;
} }
...@@ -10987,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10987,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
pf->next_vsi = 0; pf->next_vsi = 0;
pf->pdev = pdev; pf->pdev = pdev;
set_bit(__I40E_DOWN, &pf->state); set_bit(__I40E_VSI_DOWN, pf->state);
hw = &pf->hw; hw = &pf->hw;
hw->back = pf; hw->back = pf;
...@@ -11166,7 +11182,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11166,7 +11182,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->service_timer_period = HZ; pf->service_timer_period = HZ;
INIT_WORK(&pf->service_task, i40e_service_task); INIT_WORK(&pf->service_task, i40e_service_task);
clear_bit(__I40E_SERVICE_SCHED, &pf->state); clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* NVM bit on means WoL disabled for the port */ /* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
...@@ -11204,7 +11220,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11204,7 +11220,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */ /* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
!test_bit(__I40E_BAD_EEPROM, &pf->state)) { !test_bit(__I40E_BAD_EEPROM, pf->state)) {
if (pci_num_vf(pdev)) if (pci_num_vf(pdev))
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
} }
...@@ -11277,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11277,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* before setting up the misc vector or we get a race and the vector * before setting up the misc vector or we get a race and the vector
* ends up disabled forever. * ends up disabled forever.
*/ */
clear_bit(__I40E_DOWN, &pf->state); clear_bit(__I40E_VSI_DOWN, pf->state);
/* In case of MSIX we are going to setup the misc vector right here /* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI * to handle admin queue events etc. In case of legacy and MSI
...@@ -11297,7 +11313,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11297,7 +11313,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */ /* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
!test_bit(__I40E_BAD_EEPROM, &pf->state)) { !test_bit(__I40E_BAD_EEPROM, pf->state)) {
/* disable link interrupts for VFs */ /* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
...@@ -11432,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11432,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Unwind what we've done if something failed in the setup */ /* Unwind what we've done if something failed in the setup */
err_vsis: err_vsis:
set_bit(__I40E_DOWN, &pf->state); set_bit(__I40E_VSI_DOWN, pf->state);
i40e_clear_interrupt_scheme(pf); i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi); kfree(pf->vsi);
err_switch_setup: err_switch_setup:
...@@ -11483,8 +11499,8 @@ static void i40e_remove(struct pci_dev *pdev) ...@@ -11483,8 +11499,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
/* no more scheduling of any task */ /* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, &pf->state); set_bit(__I40E_VSI_DOWN, pf->state);
if (pf->service_timer.data) if (pf->service_timer.data)
del_timer_sync(&pf->service_timer); del_timer_sync(&pf->service_timer);
if (pf->service_task.func) if (pf->service_task.func)
...@@ -11592,7 +11608,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, ...@@ -11592,7 +11608,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
} }
/* shutdown all operations */ /* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, &pf->state)) { if (!test_bit(__I40E_SUSPENDED, pf->state)) {
rtnl_lock(); rtnl_lock();
i40e_prep_for_reset(pf, true); i40e_prep_for_reset(pf, true);
rtnl_unlock(); rtnl_unlock();
...@@ -11659,7 +11675,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) ...@@ -11659,7 +11675,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s\n", __func__); dev_dbg(&pdev->dev, "%s\n", __func__);
if (test_bit(__I40E_SUSPENDED, &pf->state)) if (test_bit(__I40E_SUSPENDED, pf->state))
return; return;
rtnl_lock(); rtnl_lock();
...@@ -11723,8 +11739,8 @@ static void i40e_shutdown(struct pci_dev *pdev) ...@@ -11723,8 +11739,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, &pf->state); set_bit(__I40E_VSI_DOWN, pf->state);
rtnl_lock(); rtnl_lock();
i40e_prep_for_reset(pf, true); i40e_prep_for_reset(pf, true);
rtnl_unlock(); rtnl_unlock();
...@@ -11772,8 +11788,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -11772,8 +11788,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
int retval = 0; int retval = 0;
set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, &pf->state); set_bit(__I40E_VSI_DOWN, pf->state);
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf); i40e_enable_mc_magic_wake(pf);
...@@ -11824,8 +11840,8 @@ static int i40e_resume(struct pci_dev *pdev) ...@@ -11824,8 +11840,8 @@ static int i40e_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false); pci_wake_from_d3(pdev, false);
/* handling the reset will rebuild the device state */ /* handling the reset will rebuild the device state */
if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
clear_bit(__I40E_DOWN, &pf->state); clear_bit(__I40E_VSI_DOWN, pf->state);
rtnl_lock(); rtnl_lock();
i40e_reset_and_rebuild(pf, false, true); i40e_reset_and_rebuild(pf, false, true);
rtnl_unlock(); rtnl_unlock();
......
...@@ -358,7 +358,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) ...@@ -358,7 +358,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(pf->ptp_tx_skb); dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL; pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
} }
/** /**
...@@ -768,7 +768,7 @@ void i40e_ptp_stop(struct i40e_pf *pf) ...@@ -768,7 +768,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
if (pf->ptp_tx_skb) { if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb); dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL; pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
} }
if (pf->ptp_clock) { if (pf->ptp_clock) {
......
...@@ -333,15 +333,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, ...@@ -333,15 +333,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask) I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
} else { } else {
pf->fd_tcp4_filter_cnt--; pf->fd_tcp4_filter_cnt--;
if (pf->fd_tcp4_filter_cnt == 0) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
} }
return 0; return 0;
...@@ -589,7 +583,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -589,7 +583,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
* progress do nothing, once flush is complete the state will * progress do nothing, once flush is complete the state will
* be cleared. * be cleared.
*/ */
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return; return;
pf->fd_add_err++; pf->fd_add_err++;
...@@ -597,9 +591,9 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -597,9 +591,9 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
} }
/* filter programming failed most likely due to table full */ /* filter programming failed most likely due to table full */
...@@ -611,12 +605,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -611,12 +605,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
*/ */
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->hw_disabled_flags & !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
I40E_FLAG_FD_SB_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask) if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->hw_disabled_flags |=
I40E_FLAG_FD_SB_ENABLED;
} }
} }
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
...@@ -850,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -850,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget && if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) && !test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
} }
...@@ -868,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -868,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb(); smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev, if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) && tx_ring->queue_index) &&
!test_bit(__I40E_DOWN, &vsi->state)) { !test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev, netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index); tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue; ++tx_ring->tx_stats.restart_queue;
...@@ -2179,7 +2171,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -2179,7 +2171,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
} }
enable_int: enable_int:
if (!test_bit(__I40E_DOWN, &vsi->state)) if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval); wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown) if (q_vector->itr_countdown)
...@@ -2208,7 +2200,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -2208,7 +2200,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring; int budget_per_ring;
int work_done = 0; int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) { if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi); napi_complete(napi);
return 0; return 0;
} }
...@@ -2312,7 +2304,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2312,7 +2304,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return; return;
if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
return; return;
/* if sampling is disabled do nothing */ /* if sampling is disabled do nothing */
...@@ -2346,7 +2338,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2346,7 +2338,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen); th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */ /* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
return; return;
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
/* HW ATR eviction will take care of removing filters on FIN /* HW ATR eviction will take care of removing filters on FIN
...@@ -2634,7 +2626,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2634,7 +2626,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
return 0; return 0;
if (pf->ptp_tx && if (pf->ptp_tx &&
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
pf->ptp_tx_skb = skb_get(skb); pf->ptp_tx_skb = skb_get(skb);
} else { } else {
......
...@@ -50,8 +50,8 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, ...@@ -50,8 +50,8 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
/* Not all vfs are enabled so skip the ones that are not */ /* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
continue; continue;
/* Ignore return value on purpose - a given VF may fail, but /* Ignore return value on purpose - a given VF may fail, but
...@@ -137,8 +137,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) ...@@ -137,8 +137,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
return; return;
/* verify if the VF is in either init or active before proceeding */ /* verify if the VF is in either init or active before proceeding */
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
return; return;
abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
...@@ -812,7 +812,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -812,7 +812,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
/* Start by disabling VF's configuration API to prevent the OS from /* Start by disabling VF's configuration API to prevent the OS from
* accessing the VF's VSI after it's freed / invalidated. * accessing the VF's VSI after it's freed / invalidated.
*/ */
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
/* free vsi & disconnect it from the parent uplink */ /* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) { if (vf->lan_vsi_idx) {
...@@ -884,7 +884,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) ...@@ -884,7 +884,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
vf->num_queue_pairs = total_queue_pairs; vf->num_queue_pairs = total_queue_pairs;
/* VF is now completely initialized */ /* VF is now completely initialized */
set_bit(I40E_VF_STAT_INIT, &vf->vf_states); set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
error_alloc: error_alloc:
if (ret) if (ret)
...@@ -938,7 +938,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) ...@@ -938,7 +938,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
u32 reg, reg_idx, bit_idx; u32 reg, reg_idx, bit_idx;
/* warn the VF */ /* warn the VF */
clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled /* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
...@@ -946,7 +946,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) ...@@ -946,7 +946,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* to do it earlier to give some time to finish to any VF config * to do it earlier to give some time to finish to any VF config
* functions that may still be running at this point. * functions that may still be running at this point.
*/ */
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we /* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register. * just need to clean up, so don't hit the VFRTRIG register.
...@@ -1004,10 +1004,11 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) ...@@ -1004,10 +1004,11 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
if (!i40e_alloc_vf_res(vf)) { if (!i40e_alloc_vf_res(vf)) {
int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_enable_vf_mappings(vf); i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
/* Do not notify the client during VF init */ /* Do not notify the client during VF init */
if (vf->pf->num_alloc_vfs) if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
&vf->vf_states))
i40e_notify_client_of_vf_reset(pf, abs_vf_id); i40e_notify_client_of_vf_reset(pf, abs_vf_id);
vf->num_vlan = 0; vf->num_vlan = 0;
} }
...@@ -1035,7 +1036,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -1035,7 +1036,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
int i; int i;
/* If VFs have been disabled, there is no need to reset */ /* If VFs have been disabled, there is no need to reset */
if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
return; return;
i40e_trigger_vf_reset(vf, flr); i40e_trigger_vf_reset(vf, flr);
...@@ -1072,7 +1073,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -1072,7 +1073,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf); i40e_cleanup_reset_vf(vf);
i40e_flush(hw); i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, &pf->state); clear_bit(__I40E_VF_DISABLE, pf->state);
} }
/** /**
...@@ -1097,7 +1098,7 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) ...@@ -1097,7 +1098,7 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return; return;
/* If VFs have been disabled, there is no need to reset */ /* If VFs have been disabled, there is no need to reset */
if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
return; return;
/* Begin reset on all VFs at once */ /* Begin reset on all VFs at once */
...@@ -1172,7 +1173,7 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) ...@@ -1172,7 +1173,7 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
i40e_cleanup_reset_vf(&pf->vf[v]); i40e_cleanup_reset_vf(&pf->vf[v]);
i40e_flush(hw); i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, &pf->state); clear_bit(__I40E_VF_DISABLE, pf->state);
} }
/** /**
...@@ -1189,13 +1190,25 @@ void i40e_free_vfs(struct i40e_pf *pf) ...@@ -1189,13 +1190,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf) if (!pf->vf)
return; return;
while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
i40e_notify_client_of_vf_enable(pf, 0); i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) /* Amortize wait time by stopping all VFs at the same time */
i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]); for (i = 0; i < pf->num_alloc_vfs; i++) {
if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
continue;
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
}
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
continue;
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
}
/* Disable IOV before freeing resources. This lets any VF drivers /* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank * running in the host get themselves cleaned up before we yank
...@@ -1206,13 +1219,11 @@ void i40e_free_vfs(struct i40e_pf *pf) ...@@ -1206,13 +1219,11 @@ void i40e_free_vfs(struct i40e_pf *pf)
else else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
msleep(20); /* let any messages in transit get finished up */
/* free up VF resources */ /* free up VF resources */
tmp = pf->num_alloc_vfs; tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0; pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) { for (i = 0; i < tmp; i++) {
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]); i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */ /* disable qp mappings */
i40e_disable_vf_mappings(&pf->vf[i]); i40e_disable_vf_mappings(&pf->vf[i]);
...@@ -1235,7 +1246,7 @@ void i40e_free_vfs(struct i40e_pf *pf) ...@@ -1235,7 +1246,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
} }
} }
clear_bit(__I40E_VF_DISABLE, &pf->state); clear_bit(__I40E_VF_DISABLE, pf->state);
} }
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
...@@ -1280,12 +1291,15 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) ...@@ -1280,12 +1291,15 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */ /* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true; vfs[i].spoofchk = true;
/* VF resources get allocated during reset */
i40e_reset_vf(&vfs[i], false); set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
} }
pf->num_alloc_vfs = num_alloc_vfs; pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
i40e_reset_all_vfs(pf, false);
i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
err_alloc: err_alloc:
...@@ -1312,7 +1326,7 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) ...@@ -1312,7 +1326,7 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev); int pre_existing_vfs = pci_num_vf(pdev);
int err = 0; int err = 0;
if (test_bit(__I40E_TESTING, &pf->state)) { if (test_bit(__I40E_TESTING, pf->state)) {
dev_warn(&pdev->dev, dev_warn(&pdev->dev,
"Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
err = -EPERM; err = -EPERM;
...@@ -1418,7 +1432,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, ...@@ -1418,7 +1432,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
"Number of invalid messages exceeded for VF %d\n", "Number of invalid messages exceeded for VF %d\n",
vf->vf_id); vf->vf_id);
dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
} }
} else { } else {
vf->num_valid_msgs++; vf->num_valid_msgs++;
...@@ -1493,7 +1507,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1493,7 +1507,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
int len = 0; int len = 0;
int ret; int ret;
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto err; goto err;
} }
...@@ -1522,7 +1536,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1522,7 +1536,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (i40e_vf_client_capable(pf, vf->vf_id) && if (i40e_vf_client_capable(pf, vf->vf_id) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
} }
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
...@@ -1583,7 +1597,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1583,7 +1597,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
ether_addr_copy(vfres->vsi_res[0].default_mac_addr, ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr); vf->default_lan_addr.addr);
} }
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
err: err:
/* send the response back to the VF */ /* send the response back to the VF */
...@@ -1606,7 +1620,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1606,7 +1620,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
**/ **/
static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{ {
if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
i40e_reset_vf(vf, false); i40e_reset_vf(vf, false);
} }
...@@ -1654,7 +1668,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1654,7 +1668,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
int bkt; int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id); vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
!vsi) { !vsi) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
...@@ -1715,9 +1729,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1715,9 +1729,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
"VF %d successfully set multicast promiscuous mode\n", "VF %d successfully set multicast promiscuous mode\n",
vf->vf_id); vf->vf_id);
if (allmulti) if (allmulti)
set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
else else
clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
} }
if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
...@@ -1766,9 +1780,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1766,9 +1780,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
"VF %d successfully set unicast promiscuous mode\n", "VF %d successfully set unicast promiscuous mode\n",
vf->vf_id); vf->vf_id);
if (alluni) if (alluni)
set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
else else
clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
} }
error_param: error_param:
...@@ -1797,7 +1811,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1797,7 +1811,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
int i; int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1854,7 +1868,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1854,7 +1868,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
unsigned long tempmap; unsigned long tempmap;
int i; int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1914,7 +1928,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1914,7 +1928,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vqs->vsi_id; u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1953,7 +1967,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1953,7 +1967,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1995,7 +2009,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1995,7 +2009,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
memset(&stats, 0, sizeof(struct i40e_eth_stats)); memset(&stats, 0, sizeof(struct i40e_eth_stats));
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -2082,7 +2096,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2082,7 +2096,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status ret = 0; i40e_status ret = 0;
int i; int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM; ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2151,7 +2165,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2151,7 +2165,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status ret = 0; i40e_status ret = 0;
int i; int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM; ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2217,7 +2231,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2217,7 +2231,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
goto error_param; goto error_param;
} }
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2244,12 +2258,12 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2244,12 +2258,12 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!ret) if (!ret)
vf->num_vlan++; vf->num_vlan++;
if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
true, true,
vfl->vlan_id[i], vfl->vlan_id[i],
NULL); NULL);
if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
true, true,
vfl->vlan_id[i], vfl->vlan_id[i],
...@@ -2284,7 +2298,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2284,7 +2298,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
int i; int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2307,12 +2321,12 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2307,12 +2321,12 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
vf->num_vlan--; vf->num_vlan--;
if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
false, false,
vfl->vlan_id[i], vfl->vlan_id[i],
NULL); NULL);
if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
false, false,
vfl->vlan_id[i], vfl->vlan_id[i],
...@@ -2338,8 +2352,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2338,8 +2352,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -2369,8 +2383,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, ...@@ -2369,8 +2383,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
(struct i40e_virtchnl_iwarp_qvlist_info *)msg; (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -2407,7 +2421,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2407,7 +2421,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vrk->vsi_id; u16 vsi_id = vrk->vsi_id;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
(vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
...@@ -2439,7 +2453,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2439,7 +2453,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vrl->vsi_id; u16 vsi_id = vrl->vsi_id;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
(vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
...@@ -2469,7 +2483,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2469,7 +2483,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
int len = 0; int len = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto err; goto err;
} }
...@@ -2506,7 +2520,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2506,7 +2520,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto err; goto err;
} }
...@@ -2536,7 +2550,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, ...@@ -2536,7 +2550,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int valid_len = 0; int valid_len = 0;
/* Check if VF is disabled. */ /* Check if VF is disabled. */
if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
return I40E_ERR_PARAM; return I40E_ERR_PARAM;
/* Validate message length. */ /* Validate message length. */
...@@ -2804,7 +2818,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) ...@@ -2804,7 +2818,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
struct i40e_vf *vf; struct i40e_vf *vf;
int vf_id; int vf_id;
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
return 0; return 0;
/* Re-enable the VFLR interrupt cause here, before looking for which /* Re-enable the VFLR interrupt cause here, before looking for which
...@@ -2817,7 +2831,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) ...@@ -2817,7 +2831,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
wr32(hw, I40E_PFINT_ICR0_ENA, reg); wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw); i40e_flush(hw);
clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
...@@ -2860,7 +2874,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2860,7 +2874,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
vf = &(pf->vf[vf_id]); vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id); vf_id);
ret = -EAGAIN; ret = -EAGAIN;
...@@ -2949,7 +2963,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ...@@ -2949,7 +2963,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
vf = &(pf->vf[vf_id]); vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id); vf_id);
ret = -EAGAIN; ret = -EAGAIN;
...@@ -3081,7 +3095,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, ...@@ -3081,7 +3095,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
vf = &(pf->vf[vf_id]); vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id); vf_id);
ret = -EAGAIN; ret = -EAGAIN;
...@@ -3162,7 +3176,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ...@@ -3162,7 +3176,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
vf = &(pf->vf[vf_id]); vf = &(pf->vf[vf_id]);
/* first vsi is always the LAN vsi */ /* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id); vf_id);
ret = -EAGAIN; ret = -EAGAIN;
...@@ -3281,7 +3295,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) ...@@ -3281,7 +3295,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
} }
vf = &(pf->vf[vf_id]); vf = &(pf->vf[vf_id]);
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id); vf_id);
ret = -EAGAIN; ret = -EAGAIN;
......
...@@ -56,13 +56,14 @@ enum i40e_queue_ctrl { ...@@ -56,13 +56,14 @@ enum i40e_queue_ctrl {
/* VF states */ /* VF states */
enum i40e_vf_states { enum i40e_vf_states {
I40E_VF_STAT_INIT = 0, I40E_VF_STATE_INIT = 0,
I40E_VF_STAT_ACTIVE, I40E_VF_STATE_ACTIVE,
I40E_VF_STAT_IWARPENA, I40E_VF_STATE_IWARPENA,
I40E_VF_STAT_FCOEENA, I40E_VF_STATE_FCOEENA,
I40E_VF_STAT_DISABLED, I40E_VF_STATE_DISABLED,
I40E_VF_STAT_MC_PROMISC, I40E_VF_STATE_MC_PROMISC,
I40E_VF_STAT_UC_PROMISC, I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
}; };
/* VF capabilities */ /* VF capabilities */
......
...@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget && if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) && !test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
} }
...@@ -284,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -284,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb(); smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev, if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) && tx_ring->queue_index) &&
!test_bit(__I40E_DOWN, &vsi->state)) { !test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev, netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index); tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue; ++tx_ring->tx_stats.restart_queue;
...@@ -1508,7 +1508,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1508,7 +1508,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
} }
enable_int: enable_int:
if (!test_bit(__I40E_DOWN, &vsi->state)) if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval); wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown) if (q_vector->itr_countdown)
...@@ -1537,7 +1537,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1537,7 +1537,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring; int budget_per_ring;
int work_done = 0; int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) { if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi); napi_complete(napi);
return 0; return 0;
} }
......
...@@ -49,6 +49,13 @@ ...@@ -49,6 +49,13 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: " #define PFX "i40evf: "
/* VSI state flags shared with common code */
enum i40evf_vsi_state_t {
__I40E_VSI_DOWN,
/* This must be last as it determines the size of the BITMAP */
__I40E_VSI_STATE_SIZE__,
};
/* dummy struct to make common code less painful */ /* dummy struct to make common code less painful */
struct i40e_vsi { struct i40e_vsi {
struct i40evf_adapter *back; struct i40evf_adapter *back;
...@@ -56,7 +63,7 @@ struct i40e_vsi { ...@@ -56,7 +63,7 @@ struct i40e_vsi {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid; u16 seid;
u16 id; u16 id;
unsigned long state; DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
int base_vector; int base_vector;
u16 work_limit; u16 work_limit;
u16 qs_handle; u16 qs_handle;
...@@ -168,8 +175,6 @@ enum i40evf_critical_section_t { ...@@ -168,8 +175,6 @@ enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
__I40EVF_IN_CLIENT_TASK, __I40EVF_IN_CLIENT_TASK,
}; };
/* make common code happy */
#define __I40E_DOWN __I40EVF_DOWN
/* board specific private data structure */ /* board specific private data structure */
struct i40evf_adapter { struct i40evf_adapter {
...@@ -218,7 +223,6 @@ struct i40evf_adapter { ...@@ -218,7 +223,6 @@ struct i40evf_adapter {
#define I40EVF_FLAG_ALLMULTI_ON BIT(19) #define I40EVF_FLAG_ALLMULTI_ON BIT(19)
#define I40EVF_FLAG_LEGACY_RX BIT(20) #define I40EVF_FLAG_LEGACY_RX BIT(20)
/* duplicates for common code */ /* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
......
...@@ -46,7 +46,7 @@ static const char i40evf_driver_string[] = ...@@ -46,7 +46,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 2 #define DRV_VERSION_MAJOR 2
#define DRV_VERSION_MINOR 1 #define DRV_VERSION_MINOR 1
#define DRV_VERSION_BUILD 7 #define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \ __stringify(DRV_VERSION_BUILD) \
...@@ -497,7 +497,7 @@ static void i40evf_netpoll(struct net_device *netdev) ...@@ -497,7 +497,7 @@ static void i40evf_netpoll(struct net_device *netdev)
int i; int i;
/* if interface is down do nothing */ /* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &adapter->vsi.state)) if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
return; return;
for (i = 0; i < q_vectors; i++) for (i = 0; i < q_vectors; i++)
...@@ -694,13 +694,14 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) ...@@ -694,13 +694,14 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
static void i40evf_configure_rx(struct i40evf_adapter *adapter) static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{ {
unsigned int rx_buf_len = I40E_RXBUFFER_2048; unsigned int rx_buf_len = I40E_RXBUFFER_2048;
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
int i; int i;
/* Legacy Rx will always default to a 2048 buffer size. */ /* Legacy Rx will always default to a 2048 buffer size. */
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
struct net_device *netdev = adapter->netdev;
/* For jumbo frames on systems with 4K pages we have to use /* For jumbo frames on systems with 4K pages we have to use
* an order 1 page, so we might as well increase the size * an order 1 page, so we might as well increase the size
* of our Rx buffer to make better use of the available space * of our Rx buffer to make better use of the available space
...@@ -1087,7 +1088,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) ...@@ -1087,7 +1088,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
static void i40evf_up_complete(struct i40evf_adapter *adapter) static void i40evf_up_complete(struct i40evf_adapter *adapter)
{ {
adapter->state = __I40EVF_RUNNING; adapter->state = __I40EVF_RUNNING;
clear_bit(__I40E_DOWN, &adapter->vsi.state); clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_napi_enable_all(adapter); i40evf_napi_enable_all(adapter);
...@@ -1271,13 +1272,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) ...@@ -1271,13 +1272,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
} }
pairs = adapter->num_active_queues; pairs = adapter->num_active_queues;
/* It's easy to be greedy for MSI-X vectors, but it really /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
* doesn't do us much good if we have a lot more vectors * us much good if we have more vectors than CPUs. However, we already
* than CPU's. So let's be conservative and only ask for * limit the total number of queues by the number of CPUs so we do not
* (roughly) twice the number of vectors as there are CPU's. * need any further limiting here.
*/ */
v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; v_budget = min_t(int, pairs + NONQ_VECS,
v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); (int)adapter->vf_res->max_vectors);
adapter->msix_entries = kcalloc(v_budget, adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL); sizeof(struct msix_entry), GFP_KERNEL);
...@@ -1508,6 +1509,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) ...@@ -1508,6 +1509,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
{ {
int err; int err;
err = i40evf_alloc_queues(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
rtnl_lock(); rtnl_lock();
err = i40evf_set_interrupt_capability(adapter); err = i40evf_set_interrupt_capability(adapter);
rtnl_unlock(); rtnl_unlock();
...@@ -1524,23 +1532,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) ...@@ -1524,23 +1532,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_alloc_q_vectors; goto err_alloc_q_vectors;
} }
err = i40evf_alloc_queues(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
(adapter->num_active_queues > 1) ? "Enabled" : "Disabled", (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
adapter->num_active_queues); adapter->num_active_queues);
return 0; return 0;
err_alloc_queues:
i40evf_free_q_vectors(adapter);
err_alloc_q_vectors: err_alloc_q_vectors:
i40evf_reset_interrupt_capability(adapter); i40evf_reset_interrupt_capability(adapter);
err_set_interrupt: err_set_interrupt:
i40evf_free_queues(adapter);
err_alloc_queues:
return err; return err;
} }
...@@ -1753,7 +1754,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) ...@@ -1753,7 +1754,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) { if (netif_running(adapter->netdev)) {
set_bit(__I40E_DOWN, &adapter->vsi.state); set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev); netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev); netif_tx_disable(adapter->netdev);
adapter->link_up = false; adapter->link_up = false;
...@@ -2233,7 +2234,7 @@ static int i40evf_close(struct net_device *netdev) ...@@ -2233,7 +2234,7 @@ static int i40evf_close(struct net_device *netdev)
return 0; return 0;
set_bit(__I40E_DOWN, &adapter->vsi.state); set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter)) if (CLIENT_ENABLED(adapter))
adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
...@@ -2674,7 +2675,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2674,7 +2675,7 @@ static void i40evf_init_task(struct work_struct *work)
dev_info(&pdev->dev, "GRO is enabled\n"); dev_info(&pdev->dev, "GRO is enabled\n");
adapter->state = __I40EVF_DOWN; adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state); set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_misc_irq_enable(adapter); i40evf_misc_irq_enable(adapter);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment