Commit d1aaaa2e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-09-09 (ice, igb)

This series contains updates to ice and igb drivers.

Martyna moves LLDP rule removal to the proper uninitialization function
for ice.

Jake corrects accounting logic for FWD_TO_VSI_LIST switch filters on
ice.

Przemek removes incorrect, explicit calls to pci_disable_device() for
ice.

Michal Schmidt stops incorrect use of VSI list for VLAN use on ice.

Sriram Yagnaraman adjusts igb_xdp_ring_update_tail() to be called under
Tx lock on igb.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  igb: Always call igb_xdp_ring_update_tail() under Tx lock
  ice: fix VSI lists confusion when adding VLANs
  ice: stop calling pci_disable_device() as we use pcim
  ice: fix accounting for filters shared by multiple VSIs
  ice: Fix lldp packets dropping after changing the number of channels
====================

Link: https://patch.msgid.link/20240909203842.3109822-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3d731dc9 27717f8b
...@@ -2413,13 +2413,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) ...@@ -2413,13 +2413,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int err; int err;
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err) if (err)
...@@ -2764,6 +2757,14 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2764,6 +2757,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_rss_clean(vsi); ice_rss_clean(vsi);
ice_vsi_close(vsi); ice_vsi_close(vsi);
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
ice_vsi_decfg(vsi); ice_vsi_decfg(vsi);
/* retain SW VSI data structure since it is needed to unregister and /* retain SW VSI data structure since it is needed to unregister and
......
...@@ -5363,7 +5363,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -5363,7 +5363,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_deinit(pf); ice_deinit(pf);
err_init: err_init:
ice_adapter_put(pdev); ice_adapter_put(pdev);
pci_disable_device(pdev);
return err; return err;
} }
...@@ -5470,7 +5469,6 @@ static void ice_remove(struct pci_dev *pdev) ...@@ -5470,7 +5469,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_set_wake(pf); ice_set_wake(pf);
ice_adapter_put(pdev); ice_adapter_put(pdev);
pci_disable_device(pdev);
} }
/** /**
......
...@@ -3194,7 +3194,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, ...@@ -3194,7 +3194,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */ /* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0; return -EEXIST;
/* Update the previously created VSI list set with /* Update the previously created VSI list set with
* the new VSI ID passed in * the new VSI ID passed in
...@@ -3264,7 +3264,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, ...@@ -3264,7 +3264,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules; list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) { list_for_each_entry(list_itr, list_head, list_entry) {
if (list_itr->vsi_list_info) { if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info; map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) { if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id; *vsi_list_id = map_info->vsi_list_id;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA #ifdef CONFIG_IGB_DCA
#include <linux/dca.h> #include <linux/dca.h>
#endif #endif
...@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
} }
} }
/* This function assumes __netif_tx_lock is held by the caller. */
static void igb_xdp_ring_update_tail(struct igb_ring *ring) static void igb_xdp_ring_update_tail(struct igb_ring *ring)
{ {
lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
/* Force memory writes to complete before letting h/w know there /* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. * are new descriptors to fetch.
*/ */
...@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n, ...@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nxmit++; nxmit++;
} }
__netif_tx_unlock(nq);
if (unlikely(flags & XDP_XMIT_FLUSH)) if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring); igb_xdp_ring_update_tail(tx_ring);
__netif_tx_unlock(nq);
return nxmit; return nxmit;
} }
...@@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, ...@@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{ {
unsigned int total_bytes = 0, total_packets = 0;
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring; struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring); u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
int cpu = smp_processor_id();
unsigned int xdp_xmit = 0; unsigned int xdp_xmit = 0;
struct netdev_queue *nq;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 frame_sz = 0; u32 frame_sz = 0;
int rx_buf_pgcnt; int rx_buf_pgcnt;
...@@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (xdp_xmit & IGB_XDP_TX) { if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
igb_xdp_ring_update_tail(tx_ring); igb_xdp_ring_update_tail(tx_ring);
__netif_tx_unlock(nq);
} }
u64_stats_update_begin(&rx_ring->rx_syncp); u64_stats_update_begin(&rx_ring->rx_syncp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment