Commit f5333f80 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of ra.kernel.org:/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-10-06

This series contains updates to i40e and i40evf only.

Rami fixes a typo in the code comments.

Mitch adds an ethtool private flag to control source pruning to resolve an
issue where our default behavior is to enable source pruning which breaks ARP
monitoring in channel bonding.  Fixes a couple of register definitions, which
were incorrect.

Jake fixes an issue with multiple logical CPUs per core (simultaneous
multithreading - SMT) and how we set an affinity hint based on the v_idx of
that q_vector, which is an incremental value and might lead to multiple
offline CPUs being assigned to a q_vector.  Instead, we should only assign
hints for CPUs which are online, so look to use cpumask_local_spread().
Also fixed a VF VLAN tag stripping issue, where the flag created to change
this feature was seen as unchangeable.  Lastly, organized and re-numbered
the feature flags.

Alan re-enables PTP L4 for XL710 devices with firmware version 6.0 or
greater, now that the previous bug in the older firmware is fixed.
Implements the PCI error handlers for reset_prepare() and reset_done() to
allow us to handle function level resets.

Alice cleans up code that was added to the incorrect function during a
merge.

Filip adds a change to display an error message when a module is inserted
that does not meet the thermal requirements, Talking Heads "Burning Down
the House" comes to mind.  Also fixed a flow director filter issue where
a variable was not being cleared which stores the filter number to be
removed from the list when the firmware refused to add the requested
filter.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4bc4e64c b74f571f
...@@ -350,7 +350,7 @@ struct i40e_pf { ...@@ -350,7 +350,7 @@ struct i40e_pf {
u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num VFs requested for this VF */ u16 num_req_vfs; /* num VFs requested for this PF */
u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_qps; /* num queue pairs per VF */
u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */
...@@ -403,55 +403,57 @@ struct i40e_pf { ...@@ -403,55 +403,57 @@ struct i40e_pf {
struct timer_list service_timer; struct timer_list service_timer;
struct work_struct service_task; struct work_struct service_task;
u64 hw_features; u32 hw_features;
#define I40E_HW_RSS_AQ_CAPABLE BIT_ULL(0) #define I40E_HW_RSS_AQ_CAPABLE BIT(0)
#define I40E_HW_128_QP_RSS_CAPABLE BIT_ULL(1) #define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
#define I40E_HW_ATR_EVICT_CAPABLE BIT_ULL(2) #define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
#define I40E_HW_WB_ON_ITR_CAPABLE BIT_ULL(3) #define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(4) #define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
#define I40E_HW_NO_PCI_LINK_CHECK BIT_ULL(5) #define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
#define I40E_HW_100M_SGMII_CAPABLE BIT_ULL(6) #define I40E_HW_100M_SGMII_CAPABLE BIT(6)
#define I40E_HW_NO_DCB_SUPPORT BIT_ULL(7) #define I40E_HW_NO_DCB_SUPPORT BIT(7)
#define I40E_HW_USE_SET_LLDP_MIB BIT_ULL(8) #define I40E_HW_USE_SET_LLDP_MIB BIT(8)
#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT_ULL(9) #define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
#define I40E_HW_PTP_L4_CAPABLE BIT_ULL(10) #define I40E_HW_PTP_L4_CAPABLE BIT(10)
#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(11) #define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(12) #define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12)
#define I40E_HW_HAVE_CRT_RETIMER BIT_ULL(13) #define I40E_HW_HAVE_CRT_RETIMER BIT(13)
#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT_ULL(14) #define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
#define I40E_HW_PHY_CONTROLS_LEDS BIT_ULL(15) #define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
#define I40E_HW_STOP_FW_LLDP BIT_ULL(16) #define I40E_HW_STOP_FW_LLDP BIT(16)
#define I40E_HW_PORT_ID_VALID BIT_ULL(17) #define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT_ULL(18) #define I40E_HW_RESTART_AUTONEG BIT(18)
u64 flags; u64 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) #define I40E_FLAG_MSI_ENABLED BIT(1)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) #define I40E_FLAG_MSIX_ENABLED BIT(2)
#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) #define I40E_FLAG_RSS_ENABLED BIT(3)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_VMDQ_ENABLED BIT(4)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_FILTER_SYNC BIT(5)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) #define I40E_FLAG_SRIOV_ENABLED BIT(6)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) #define I40E_FLAG_DCB_CAPABLE BIT(7)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) #define I40E_FLAG_DCB_ENABLED BIT(8)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) #define I40E_FLAG_FD_SB_ENABLED BIT(9)
#define I40E_FLAG_DCB_ENABLED BIT_ULL(20) #define I40E_FLAG_FD_ATR_ENABLED BIT(10)
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) #define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11)
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) #define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12)
#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23) #define I40E_FLAG_MFP_ENABLED BIT(13)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24) #define I40E_FLAG_UDP_FILTER_SYNC BIT(14)
#define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #define I40E_FLAG_VEB_MODE_ENABLED BIT(16)
#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) #define I40E_FLAG_VEB_STATS_ENABLED BIT(17)
#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) #define I40E_FLAG_LINK_POLLING_ENABLED BIT(18)
#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37) #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19)
#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_TEMP_LINK_POLLING BIT(20)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) #define I40E_FLAG_LEGACY_RX BIT(21)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) #define I40E_FLAG_PTP BIT(22)
#define I40E_FLAG_CLIENT_RESET BIT_ULL(54) #define I40E_FLAG_IWARP_ENABLED BIT(23)
#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) #define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24)
#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) #define I40E_FLAG_CLIENT_L2_CHANGE BIT(25)
#define I40E_FLAG_LEGACY_RX BIT_ULL(58) #define I40E_FLAG_CLIENT_RESET BIT(26)
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
bool stat_offsets_loaded; bool stat_offsets_loaded;
......
...@@ -1771,9 +1771,10 @@ enum i40e_aq_phy_type { ...@@ -1771,9 +1771,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22, I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_EMPTY = 0xFE,
I40E_PHY_TYPE_DEFAULT = 0xFF, I40E_PHY_TYPE_DEFAULT = 0xFF,
I40E_PHY_TYPE_MAX
}; };
#define I40E_LINK_SPEED_100MB_SHIFT 0x1 #define I40E_LINK_SPEED_100MB_SHIFT 0x1
......
...@@ -278,8 +278,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -278,8 +278,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->netdev, rx_ring->netdev,
rx_ring->rx_bi); rx_ring->rx_bi);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
i, rx_ring->state, i, *rx_ring->state,
rx_ring->queue_index, rx_ring->queue_index,
rx_ring->reg_idx); rx_ring->reg_idx);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
...@@ -334,8 +334,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -334,8 +334,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->netdev, tx_ring->netdev,
tx_ring->tx_bi); tx_ring->tx_bi);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
i, tx_ring->state, i, *tx_ring->state,
tx_ring->queue_index, tx_ring->queue_index,
tx_ring->reg_idx); tx_ring->reg_idx);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
......
...@@ -227,6 +227,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { ...@@ -227,6 +227,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
}; };
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
...@@ -2008,7 +2010,9 @@ static int i40e_set_phys_id(struct net_device *netdev, ...@@ -2008,7 +2010,9 @@ static int i40e_set_phys_id(struct net_device *netdev,
if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
pf->led_status = i40e_led_get(hw); pf->led_status = i40e_led_get(hw);
} else { } else {
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
NULL);
ret = i40e_led_get_phy(hw, &temp_status, ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val); &pf->phy_led_val);
pf->led_status = temp_status; pf->led_status = temp_status;
...@@ -2033,7 +2037,8 @@ static int i40e_set_phys_id(struct net_device *netdev, ...@@ -2033,7 +2037,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
ret = i40e_led_set_phy(hw, false, pf->led_status, ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val | (pf->phy_led_val |
I40E_PHY_LED_MODE_ORIG)); I40E_PHY_LED_MODE_ORIG));
i40e_aq_set_phy_debug(hw, 0, NULL); if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
i40e_aq_set_phy_debug(hw, 0, NULL);
} }
break; break;
default: default:
...@@ -4090,7 +4095,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -4090,7 +4095,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
u64 orig_flags, new_flags, changed_flags; u32 orig_flags, new_flags, changed_flags;
u32 i, j; u32 i, j;
orig_flags = READ_ONCE(pf->flags); orig_flags = READ_ONCE(pf->flags);
...@@ -4142,12 +4147,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -4142,12 +4147,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Compare and exchange the new flags into place. If we failed, that /* Compare and exchange the new flags into place. If we failed, that
* is if cmpxchg64 returns anything but the old value, this means that * is if cmpxchg returns anything but the old value, this means that
* something else has modified the flags variable since we copied it * something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the * originally. We'll just punt with an error and log something in the
* message buffer. * message buffer.
*/ */
if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) { if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n"); "Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN; return -EAGAIN;
...@@ -4189,8 +4194,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -4189,8 +4194,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
/* Issue reset to cause things to take effect, as additional bits /* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset * are added we will need to create a mask of bits requiring reset
*/ */
if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) || if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev))) I40E_FLAG_LEGACY_RX |
I40E_FLAG_SOURCE_PRUNING_DISABLED))
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0; return 0;
......
...@@ -1776,11 +1776,6 @@ static void i40e_set_rx_mode(struct net_device *netdev) ...@@ -1776,11 +1776,6 @@ static void i40e_set_rx_mode(struct net_device *netdev)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC; vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
} }
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
i40e_service_event_schedule(vsi->back);
} }
/** /**
...@@ -2885,14 +2880,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) ...@@ -2885,14 +2880,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
static void i40e_config_xps_tx_ring(struct i40e_ring *ring) static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
{ {
struct i40e_vsi *vsi = ring->vsi; struct i40e_vsi *vsi = ring->vsi;
int cpu;
if (!ring->q_vector || !ring->netdev) if (!ring->q_vector || !ring->netdev)
return; return;
if ((vsi->tc_config.numtc <= 1) && if ((vsi->tc_config.numtc <= 1) &&
!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) {
netif_set_xps_queue(ring->netdev, cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
get_cpu_mask(ring->q_vector->v_idx), netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
ring->queue_index); ring->queue_index);
} }
...@@ -3009,7 +3005,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -3009,7 +3005,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx; struct i40e_hmc_obj_rxq rx_ctx;
i40e_status err = 0; i40e_status err = 0;
ring->state = 0; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */ /* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx)); memset(&rx_ctx, 0, sizeof(rx_ctx));
...@@ -3482,6 +3478,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) ...@@ -3482,6 +3478,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int tx_int_idx = 0; int tx_int_idx = 0;
int vector, err; int vector, err;
int irq_num; int irq_num;
int cpu;
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
...@@ -3517,10 +3514,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) ...@@ -3517,10 +3514,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release; q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* get_cpu_mask returns a static constant mask with /* Spread affinity hints out across online CPUs.
* a permanent lifetime so it's ok to use here. *
* get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to
* irq_set_affinity_hint without making a copy.
*/ */
irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); cpu = cpumask_local_spread(q_vector->v_idx, -1);
irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
} }
vsi->irqs_ready = true; vsi->irqs_ready = true;
...@@ -6231,6 +6232,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) ...@@ -6231,6 +6232,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
hlist_del(&filter->fdir_node); hlist_del(&filter->fdir_node);
kfree(filter); kfree(filter);
pf->fdir_pf_active_filters--; pf->fdir_pf_active_filters--;
pf->fd_inv = 0;
} }
} }
} }
...@@ -6557,12 +6559,26 @@ static void i40e_handle_link_event(struct i40e_pf *pf, ...@@ -6557,12 +6559,26 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
*/ */
i40e_link_event(pf); i40e_link_event(pf);
/* check for unqualified module, if link is down */ /* Check if module meets thermal requirements */
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && dev_err(&pf->pdev->dev,
(!(status->link_info & I40E_AQ_LINK_UP))) "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"The driver failed to link because an unqualified module was detected.\n"); "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
} else {
/* check for unqualified module, if link is down, suppress
* the message if link was forced to be down.
*/
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)) &&
(!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
dev_err(&pf->pdev->dev,
"Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
dev_err(&pf->pdev->dev,
"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
}
}
} }
/** /**
...@@ -9068,6 +9084,11 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -9068,6 +9084,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.aq.fw_maj_ver >= 5))) (pf->hw.aq.fw_maj_ver >= 5)))
pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
/* Enable PTP L4 if FW > v6.0 */
if (pf->hw.mac.type == I40E_MAC_XL710 &&
pf->hw.aq.fw_maj_ver >= 6)
pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
if (pf->hw.func_caps.vmdq) { if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->flags |= I40E_FLAG_VMDQ_ENABLED;
...@@ -9903,6 +9924,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9903,6 +9924,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
enabled_tc = i40e_pf_get_tc_map(pf); enabled_tc = i40e_pf_get_tc_map(pf);
/* Source pruning is enabled by default, so the flag is
* negative logic - if it's set, we need to fiddle with
* the VSI to disable source pruning.
*/
if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.vf_num = 0;
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
}
/* MFP mode setup queue map and update VSI */ /* MFP mode setup queue map and update VSI */
if ((pf->flags & I40E_FLAG_MFP_ENABLED) && if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
!(pf->hw.func_caps.iscsi)) { /* NIC type PF */ !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
...@@ -11999,6 +12045,28 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) ...@@ -11999,6 +12045,28 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
return result; return result;
} }
/**
* i40e_pci_error_reset_prepare - prepare device driver for pci reset
* @pdev: PCI device information struct
*/
static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
i40e_prep_for_reset(pf, false);
}
/**
* i40e_pci_error_reset_done - pci reset done, device driver reset can begin
* @pdev: PCI device information struct
*/
static void i40e_pci_error_reset_done(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
i40e_reset_and_rebuild(pf, false, false);
}
/** /**
* i40e_pci_error_resume - restart operations after PCI error recovery * i40e_pci_error_resume - restart operations after PCI error recovery
* @pdev: PCI device information struct * @pdev: PCI device information struct
...@@ -12189,6 +12257,8 @@ static int i40e_resume(struct device *dev) ...@@ -12189,6 +12257,8 @@ static int i40e_resume(struct device *dev)
static const struct pci_error_handlers i40e_err_handler = { static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected, .error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset, .slot_reset = i40e_pci_error_slot_reset,
.reset_prepare = i40e_pci_error_reset_prepare,
.reset_done = i40e_pci_error_reset_done,
.resume = i40e_pci_error_resume, .resume = i40e_pci_error_resume,
}; };
......
...@@ -2794,7 +2794,7 @@ ...@@ -2794,7 +2794,7 @@
#define I40E_GLV_RUPP_MAX_INDEX 383 #define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0 #define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) #define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383 #define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0 #define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) #define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
......
...@@ -3167,38 +3167,12 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -3167,38 +3167,12 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* write last descriptor with EOP bit */ /* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP; td_cmd |= I40E_TX_DESC_CMD_EOP;
/* We can OR these values together as they both are checked against /* We OR these values together to check both against 4 (WB_STRIDE)
* 4 below and at this point desc_count will be used as a boolean value * below. This is safe since we don't re-use desc_count afterwards.
* after this if/else block.
*/ */
desc_count |= ++tx_ring->packet_stride; desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting: if (desc_count >= WB_STRIDE) {
* if queue is stopped
* mark RS bit
* reset packet counter
* else if xmit_more is supported and is true
* advance packet counter to 4
* reset desc_count to 0
*
* if desc_count >= 4
* mark RS bit
* reset packet counter
* if desc_count > 0
* update tail
*
* Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
if (netif_xmit_stopped(txring_txq(tx_ring))) {
goto do_rs;
} else if (skb->xmit_more) {
/* set stride to arm on next packet and reset desc_count */
tx_ring->packet_stride = WB_STRIDE;
desc_count = 0;
} else if (desc_count >= WB_STRIDE) {
do_rs:
/* write last descriptor with RS bit set */ /* write last descriptor with RS bit set */
td_cmd |= I40E_TX_DESC_CMD_RS; td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0; tx_ring->packet_stride = 0;
...@@ -3219,7 +3193,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -3219,7 +3193,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc; first->next_to_watch = tx_desc;
/* notify HW of packet */ /* notify HW of packet */
if (desc_count) { if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail /* we need this if more than one processor can write to our tail
......
...@@ -342,6 +342,7 @@ struct i40e_rx_queue_stats { ...@@ -342,6 +342,7 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t { enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE, __I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE, __I40E_TX_XPS_INIT_DONE,
__I40E_RING_STATE_NBITS /* must be last */
}; };
/* some useful defines for virtchannel interface, which /* some useful defines for virtchannel interface, which
...@@ -366,7 +367,7 @@ struct i40e_ring { ...@@ -366,7 +367,7 @@ struct i40e_ring {
struct i40e_tx_buffer *tx_bi; struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi; struct i40e_rx_buffer *rx_bi;
}; };
unsigned long state; DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */ u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */ u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail; u8 __iomem *tail;
......
...@@ -1767,9 +1767,10 @@ enum i40e_aq_phy_type { ...@@ -1767,9 +1767,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22, I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_EMPTY = 0xFE,
I40E_PHY_TYPE_DEFAULT = 0xFF, I40E_PHY_TYPE_DEFAULT = 0xFF,
I40E_PHY_TYPE_MAX
}; };
#define I40E_LINK_SPEED_100MB_SHIFT 0x1 #define I40E_LINK_SPEED_100MB_SHIFT 0x1
......
...@@ -325,6 +325,7 @@ struct i40e_rx_queue_stats { ...@@ -325,6 +325,7 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t { enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE, __I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE, __I40E_TX_XPS_INIT_DONE,
__I40E_RING_STATE_NBITS /* must be last */
}; };
/* some useful defines for virtchannel interface, which /* some useful defines for virtchannel interface, which
...@@ -348,7 +349,7 @@ struct i40e_ring { ...@@ -348,7 +349,7 @@ struct i40e_ring {
struct i40e_tx_buffer *tx_bi; struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi; struct i40e_rx_buffer *rx_bi;
}; };
unsigned long state; DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */ u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */ u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail; u8 __iomem *tail;
......
...@@ -222,22 +222,22 @@ struct i40evf_adapter { ...@@ -222,22 +222,22 @@ struct i40evf_adapter {
u32 flags; u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_IMIR_ENABLED BIT(1)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_MQ_CAPABLE BIT(2)
#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) #define I40EVF_FLAG_PF_COMMS_FAILED BIT(3)
#define I40EVF_FLAG_RESET_PENDING BIT(9) #define I40EVF_FLAG_RESET_PENDING BIT(4)
#define I40EVF_FLAG_RESET_NEEDED BIT(10) #define I40EVF_FLAG_RESET_NEEDED BIT(5)
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(7)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8)
#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14) #define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9)
#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15) #define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16) #define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17) #define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
#define I40EVF_FLAG_PROMISC_ON BIT(18) #define I40EVF_FLAG_PROMISC_ON BIT(13)
#define I40EVF_FLAG_ALLMULTI_ON BIT(19) #define I40EVF_FLAG_ALLMULTI_ON BIT(14)
#define I40EVF_FLAG_LEGACY_RX BIT(20) #define I40EVF_FLAG_LEGACY_RX BIT(15)
#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(21) #define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16)
/* duplicates for common code */ /* duplicates for common code */
#define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
......
...@@ -515,6 +515,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) ...@@ -515,6 +515,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
unsigned int vector, q_vectors; unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0; unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err; int irq_num, err;
int cpu;
i40evf_irq_disable(adapter); i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */ /* Decrement for Other and TCP Timer vectors */
...@@ -553,10 +554,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) ...@@ -553,10 +554,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector->affinity_notify.release = q_vector->affinity_notify.release =
i40evf_irq_affinity_release; i40evf_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* get_cpu_mask returns a static constant mask with /* Spread the IRQ affinity hints across online CPUs. Note that
* a permanent lifetime so it's ok to use here. * get_cpu_mask returns a mask with a permanent lifetime so
* it's safe to use as a hint for irq_set_affinity_hint.
*/ */
irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); cpu = cpumask_local_spread(q_vector->v_idx, -1);
irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
} }
return 0; return 0;
...@@ -2420,10 +2423,6 @@ static netdev_features_t i40evf_features_check(struct sk_buff *skb, ...@@ -2420,10 +2423,6 @@ static netdev_features_t i40evf_features_check(struct sk_buff *skb,
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
} }
#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
NETIF_F_HW_VLAN_CTAG_RX |\
NETIF_F_HW_VLAN_CTAG_FILTER)
/** /**
* i40evf_fix_features - fix up the netdev feature bits * i40evf_fix_features - fix up the netdev feature bits
* @netdev: our net device * @netdev: our net device
...@@ -2436,9 +2435,11 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev, ...@@ -2436,9 +2435,11 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
{ {
struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_adapter *adapter = netdev_priv(netdev);
features &= ~I40EVF_VLAN_FEATURES; if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
features |= I40EVF_VLAN_FEATURES; NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER);
return features; return features;
} }
...@@ -2569,9 +2570,17 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ...@@ -2569,9 +2570,17 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
*/ */
hw_features = hw_enc_features; hw_features = hw_enc_features;
/* Enable VLAN features if supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
netdev->hw_features |= hw_features; netdev->hw_features |= hw_features;
netdev->features |= hw_features | I40EVF_VLAN_FEATURES; netdev->features |= hw_features;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
adapter->vsi.id = adapter->vsi_res->vsi_id; adapter->vsi.id = adapter->vsi_res->vsi_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment