Commit 8dd5b698 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-04-30

This series contains updates to i40e and i40evf only.

Jake provides majority of the changes in this series, starting with the
renaming of a flag to avoid confusion.  Then renamed a variable to a
more meaningful name to clarify what is actually being done and to
reduce confusion.  Amortizes the wait time when initializing or disabling
lots of VFs by using i40e_reset_all_vfs() and
i40e_vsi_stop_rings_no_wait().  Cleaned up a unnecessary delay since
pci_disable_sriov() already has its own delay, so need to add a additional
delay when removing VFs.  Avoid using the same name flags for both
vsi->state and pf->state, to make code review easier and assist future
work to use the correct state field when checking bits.  Use
DECLARE_BITMAP() to ensure that we always allocate enough space for flags.
Replace hw_disabled_flags with the new _AUTO_DISABLED flags, which are
more readable because we are not setting an *_ENABLED flag to
disable the feature.

Alex corrects a oversight where we were not reprogramming the ports
after a reset, which was causing us to lose all of the receive tunnel
offloads.

Arnd Bergmann moves the declaration of a local variable to avoid a
warning seen on architectures with larger pages about an unused variable.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5b36d8f5 3dfc3eb5
......@@ -125,7 +125,6 @@ enum i40e_state_t {
__I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE,
__I40E_DOWN,
__I40E_NEEDS_RESTART,
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
......@@ -138,7 +137,6 @@ enum i40e_state_t {
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
__I40E_PTP_TX_IN_PROGRESS,
__I40E_BAD_EEPROM,
......@@ -147,6 +145,20 @@ enum i40e_state_t {
__I40E_RESET_FAILED,
__I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
/* VSI state flags */
enum i40e_vsi_state_t {
__I40E_VSI_DOWN,
__I40E_VSI_NEEDS_RESTART,
__I40E_VSI_SYNCING_FILTERS,
__I40E_VSI_OVERFLOW_PROMISC,
__I40E_VSI_REINIT_REQUESTED,
__I40E_VSI_DOWN_REQUESTED,
/* This must be last as it determines the size of the BITMAP */
__I40E_VSI_STATE_SIZE__,
};
enum i40e_interrupt_policy {
......@@ -245,7 +257,7 @@ struct i40e_tc_configuration {
struct i40e_udp_port_config {
/* AdminQ command interface expects port number in Host byte order */
u16 index;
u16 port;
u8 type;
};
......@@ -322,7 +334,7 @@ struct i40e_flex_pit {
struct i40e_pf {
struct pci_dev *pdev;
struct i40e_hw hw;
unsigned long state;
DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
bool fc_autoneg_status;
......@@ -396,6 +408,8 @@ struct i40e_pf {
#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24)
#define I40E_FLAG_PTP BIT_ULL(25)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27)
......@@ -428,13 +442,6 @@ struct i40e_pf {
#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57)
#define I40E_FLAG_LEGACY_RX BIT_ULL(58)
/* Tracks features that are disabled due to hw limitations.
* If a bit is set here, it means that the corresponding
* bit in the 'flags' field is cleared i.e that feature
* is disabled
*/
u64 hw_disabled_flags;
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
......@@ -593,7 +600,7 @@ struct i40e_vsi {
bool stat_offsets_loaded;
u32 current_netdev_flags;
unsigned long state;
DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
......
......@@ -371,8 +371,8 @@ void i40e_client_subtask(struct i40e_pf *pf)
cdev = pf->cinst;
/* If we're down or resetting, just bail */
if (test_bit(__I40E_DOWN, &pf->state) ||
test_bit(__I40E_CONFIG_BUSY, &pf->state))
if (test_bit(__I40E_DOWN, pf->state) ||
test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
if (!client || !cdev)
......@@ -382,7 +382,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
* the netdev is up, then open the client.
*/
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (!test_bit(__I40E_DOWN, &vsi->state) &&
if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
client->ops && client->ops->open) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
ret = client->ops->open(&cdev->lan_info, client);
......@@ -397,7 +397,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Likewise for client close. If the client is up, but the netdev
* is down, then close the client.
*/
if (test_bit(__I40E_DOWN, &vsi->state) &&
if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
client->ops && client->ops->close) {
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
client->ops->close(&cdev->lan_info, client, false);
......@@ -503,7 +503,7 @@ static void i40e_client_release(struct i40e_client *client)
continue;
while (test_and_set_bit(__I40E_SERVICE_SCHED,
&pf->state))
pf->state))
usleep_range(500, 1000);
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
......@@ -521,7 +521,7 @@ static void i40e_client_release(struct i40e_client *client)
i40e_client_del_instance(pf);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
clear_bit(__I40E_SERVICE_SCHED, pf->state);
}
mutex_unlock(&i40e_device_mutex);
}
......@@ -661,10 +661,10 @@ static void i40e_client_request_reset(struct i40e_info *ldev,
switch (reset_level) {
case I40E_CLIENT_RESET_LEVEL_PF:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
case I40E_CLIENT_RESET_LEVEL_CORE:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
default:
dev_warn(&pf->pdev->dev,
......
......@@ -158,9 +158,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
" state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->state, vsi->flags,
vsi->netdev_registered, vsi->current_netdev_flags);
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
dev_info(&pf->pdev->dev,
" state[%d] = %08lx\n",
i, vsi->state[i]);
if (vsi == pf->vsi[pf->lan_vsi])
dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
......@@ -174,7 +177,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
}
dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold,
(test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
(test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
"ON" : "OFF"));
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
......@@ -1706,7 +1709,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
} else if (!vsi->netdev) {
dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
vsi_seid);
} else if (test_bit(__I40E_DOWN, &vsi->state)) {
} else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
vsi_seid);
} else if (rtnl_trylock()) {
......
......@@ -757,7 +757,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
return -EOPNOTSUPP;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) {
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--;
if (!timeout)
return -EBUSY;
......@@ -891,7 +891,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
}
done:
clear_bit(__I40E_CONFIG_BUSY, &pf->state);
clear_bit(__I40E_CONFIG_BUSY, pf->state);
return err;
}
......@@ -987,7 +987,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
}
/* If we have link and don't have autoneg */
if (!test_bit(__I40E_DOWN, &pf->state) &&
if (!test_bit(__I40E_DOWN, pf->state) &&
!(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
......@@ -1039,10 +1039,10 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
if (!test_bit(__I40E_DOWN, &pf->state)) {
if (!test_bit(__I40E_DOWN, pf->state)) {
/* Give it a little more time to try to come back */
msleep(75);
if (!test_bit(__I40E_DOWN, &pf->state))
if (!test_bit(__I40E_DOWN, pf->state))
return i40e_nway_reset(netdev);
}
......@@ -1139,8 +1139,8 @@ static int i40e_get_eeprom(struct net_device *netdev,
/* make sure it is the right magic for NVMUpdate */
if ((eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL;
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY;
else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
......@@ -1246,8 +1246,8 @@ static int i40e_set_eeprom(struct net_device *netdev,
/* check for NVMUpdate access method */
else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL;
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY;
else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
......@@ -1332,7 +1332,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) {
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--;
if (!timeout)
return -EBUSY;
......@@ -1485,7 +1485,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
done:
clear_bit(__I40E_CONFIG_BUSY, &pf->state);
clear_bit(__I40E_CONFIG_BUSY, pf->state);
return err;
}
......@@ -1826,7 +1826,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states))
return true;
return false;
}
......@@ -1847,7 +1847,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* Offline tests */
netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state);
set_bit(__I40E_TESTING, pf->state);
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
......@@ -1857,7 +1857,7 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
......@@ -1886,7 +1886,7 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
clear_bit(__I40E_TESTING, pf->state);
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
if (if_running)
......@@ -2924,11 +2924,11 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
int ret = 0;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY;
ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
......@@ -3643,14 +3643,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP;
if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)
if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)
return -ENOSPC;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY;
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
......@@ -4086,12 +4086,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
/* Flush current ATR settings if ATR was disabled */
if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
/* Only allow ATR evict on hardware that is capable of handling it */
if (pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
......
This diff is collapsed.
......@@ -358,7 +358,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
}
/**
......@@ -768,7 +768,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
}
if (pf->ptp_clock) {
......
......@@ -333,15 +333,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
} else {
pf->fd_tcp4_filter_cnt--;
if (pf->fd_tcp4_filter_cnt == 0) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
}
return 0;
......@@ -589,7 +583,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
* progress do nothing, once flush is complete the state will
* be cleared.
*/
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return;
pf->fd_add_err++;
......@@ -597,9 +591,9 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
/* filter programming failed most likely due to table full */
......@@ -611,12 +605,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->hw_disabled_flags &
I40E_FLAG_FD_SB_ENABLED)) {
!(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->hw_disabled_flags |=
I40E_FLAG_FD_SB_ENABLED;
}
}
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
......@@ -850,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
!test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
......@@ -868,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
!test_bit(__I40E_DOWN, &vsi->state)) {
!test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
......@@ -2179,7 +2171,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
enable_int:
if (!test_bit(__I40E_DOWN, &vsi->state))
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
......@@ -2208,7 +2200,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring;
int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) {
if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
......@@ -2312,7 +2304,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
return;
/* if sampling is disabled do nothing */
......@@ -2346,7 +2338,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
return;
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
/* HW ATR eviction will take care of removing filters on FIN
......@@ -2634,7 +2626,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
return 0;
if (pf->ptp_tx &&
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
pf->ptp_tx_skb = skb_get(skb);
} else {
......
......@@ -56,13 +56,14 @@ enum i40e_queue_ctrl {
/* VF states */
enum i40e_vf_states {
I40E_VF_STAT_INIT = 0,
I40E_VF_STAT_ACTIVE,
I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
I40E_VF_STAT_MC_PROMISC,
I40E_VF_STAT_UC_PROMISC,
I40E_VF_STATE_INIT = 0,
I40E_VF_STATE_ACTIVE,
I40E_VF_STATE_IWARPENA,
I40E_VF_STATE_FCOEENA,
I40E_VF_STATE_DISABLED,
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
};
/* VF capabilities */
......
......@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
!test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
......@@ -284,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
!test_bit(__I40E_DOWN, &vsi->state)) {
!test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
......@@ -1508,7 +1508,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
enable_int:
if (!test_bit(__I40E_DOWN, &vsi->state))
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
......@@ -1537,7 +1537,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring;
int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) {
if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
......
......@@ -49,6 +49,13 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: "
/* VSI state flags shared with common code */
enum i40evf_vsi_state_t {
__I40E_VSI_DOWN,
/* This must be last as it determines the size of the BITMAP */
__I40E_VSI_STATE_SIZE__,
};
/* dummy struct to make common code less painful */
struct i40e_vsi {
struct i40evf_adapter *back;
......@@ -56,7 +63,7 @@ struct i40e_vsi {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
unsigned long state;
DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
int base_vector;
u16 work_limit;
u16 qs_handle;
......@@ -168,8 +175,6 @@ enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
__I40EVF_IN_CLIENT_TASK,
};
/* make common code happy */
#define __I40E_DOWN __I40EVF_DOWN
/* board specific private data structure */
struct i40evf_adapter {
......@@ -218,7 +223,6 @@ struct i40evf_adapter {
#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
#define I40EVF_FLAG_LEGACY_RX BIT(20)
/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
......
......@@ -46,7 +46,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 2
#define DRV_VERSION_MINOR 1
#define DRV_VERSION_BUILD 7
#define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
......@@ -497,7 +497,7 @@ static void i40evf_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &adapter->vsi.state))
if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
return;
for (i = 0; i < q_vectors; i++)
......@@ -694,13 +694,14 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
unsigned int rx_buf_len = I40E_RXBUFFER_2048;
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
int i;
/* Legacy Rx will always default to a 2048 buffer size. */
#if (PAGE_SIZE < 8192)
if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
struct net_device *netdev = adapter->netdev;
/* For jumbo frames on systems with 4K pages we have to use
* an order 1 page, so we might as well increase the size
* of our Rx buffer to make better use of the available space
......@@ -1087,7 +1088,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
clear_bit(__I40E_DOWN, &adapter->vsi.state);
clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_napi_enable_all(adapter);
......@@ -1271,13 +1272,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
}
pairs = adapter->num_active_queues;
/* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) twice the number of vectors as there are CPU's.
/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
* us much good if we have more vectors than CPUs. However, we already
* limit the total number of queues by the number of CPUs so we do not
* need any further limiting here.
*/
v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
v_budget = min_t(int, pairs + NONQ_VECS,
(int)adapter->vf_res->max_vectors);
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
......@@ -1508,6 +1509,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
{
int err;
err = i40evf_alloc_queues(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
rtnl_unlock();
......@@ -1524,23 +1532,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_alloc_q_vectors;
}
err = i40evf_alloc_queues(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
(adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
adapter->num_active_queues);
return 0;
err_alloc_queues:
i40evf_free_q_vectors(adapter);
err_alloc_q_vectors:
i40evf_reset_interrupt_capability(adapter);
err_set_interrupt:
i40evf_free_queues(adapter);
err_alloc_queues:
return err;
}
......@@ -1753,7 +1754,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) {
set_bit(__I40E_DOWN, &adapter->vsi.state);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
adapter->link_up = false;
......@@ -2233,7 +2234,7 @@ static int i40evf_close(struct net_device *netdev)
return 0;
set_bit(__I40E_DOWN, &adapter->vsi.state);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
......@@ -2674,7 +2675,7 @@ static void i40evf_init_task(struct work_struct *work)
dev_info(&pdev->dev, "GRO is enabled\n");
adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment