Commit 739cb499 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-03-16 (igb, igbvf, igc)

This series contains updates to igb, igbvf, and igc drivers.

Lin Ma removes rtnl_lock() when disabling SRIOV on remove which was
causing deadlock on igb.

Akihiko Odaki delays enabling of SRIOV on igb to prevent early messages
that could get ignored and clears MAC address when PF returns nack on
reset; indicating no MAC address was assigned for igbvf.

Gaosheng Cui frees IRQs in error path for igbvf.

Akashi Takahiro fixes logic on checking TAPRIO gate support for igc.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e8d20c3d 2b4cc3d3
...@@ -109,6 +109,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *); ...@@ -109,6 +109,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *); static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void igb_remove(struct pci_dev *pdev); static void igb_remove(struct pci_dev *pdev);
static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *); static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *); int igb_open(struct net_device *);
int igb_close(struct net_device *); int igb_close(struct net_device *);
...@@ -175,9 +176,7 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter); ...@@ -175,9 +176,7 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf); static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
static int igb_disable_sriov(struct pci_dev *dev);
static int igb_pci_disable_sriov(struct pci_dev *dev);
#endif #endif
static int igb_suspend(struct device *); static int igb_suspend(struct device *);
...@@ -3665,7 +3664,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3665,7 +3664,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
kfree(adapter->shadow_vfta); kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter); igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
igb_disable_sriov(pdev); igb_disable_sriov(pdev, false);
#endif #endif
pci_iounmap(pdev, adapter->io_addr); pci_iounmap(pdev, adapter->io_addr);
err_ioremap: err_ioremap:
...@@ -3679,7 +3678,38 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3679,7 +3678,38 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
static int igb_disable_sriov(struct pci_dev *pdev) static int igb_sriov_reinit(struct pci_dev *dev)
{
struct net_device *netdev = pci_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
rtnl_lock();
if (netif_running(netdev))
igb_close(netdev);
else
igb_reset(adapter);
igb_clear_interrupt_scheme(adapter);
igb_init_queue_configuration(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
rtnl_unlock();
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
if (netif_running(netdev))
igb_open(netdev);
rtnl_unlock();
return 0;
}
static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
...@@ -3713,10 +3743,10 @@ static int igb_disable_sriov(struct pci_dev *pdev) ...@@ -3713,10 +3743,10 @@ static int igb_disable_sriov(struct pci_dev *pdev)
adapter->flags |= IGB_FLAG_DMAC; adapter->flags |= IGB_FLAG_DMAC;
} }
return 0; return reinit ? igb_sriov_reinit(pdev) : 0;
} }
static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
...@@ -3781,12 +3811,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) ...@@ -3781,12 +3811,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
"Unable to allocate memory for VF MAC filter list\n"); "Unable to allocate memory for VF MAC filter list\n");
} }
/* only call pci_enable_sriov() if no VFs are allocated already */
if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
if (err)
goto err_out;
}
dev_info(&pdev->dev, "%d VFs allocated\n", dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count); adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++) for (i = 0; i < adapter->vfs_allocated_count; i++)
...@@ -3794,6 +3818,17 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) ...@@ -3794,6 +3818,17 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
/* DMA Coalescing is not supported in IOV mode. */ /* DMA Coalescing is not supported in IOV mode. */
adapter->flags &= ~IGB_FLAG_DMAC; adapter->flags &= ~IGB_FLAG_DMAC;
if (reinit) {
err = igb_sriov_reinit(pdev);
if (err)
goto err_out;
}
/* only call pci_enable_sriov() if no VFs are allocated already */
if (!old_vfs)
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
goto out; goto out;
err_out: err_out:
...@@ -3863,9 +3898,7 @@ static void igb_remove(struct pci_dev *pdev) ...@@ -3863,9 +3898,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter); igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
rtnl_lock(); igb_disable_sriov(pdev, false);
igb_disable_sriov(pdev);
rtnl_unlock();
#endif #endif
unregister_netdev(netdev); unregister_netdev(netdev);
...@@ -3911,7 +3944,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter) ...@@ -3911,7 +3944,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
igb_reset_interrupt_capability(adapter); igb_reset_interrupt_capability(adapter);
pci_sriov_set_totalvfs(pdev, 7); pci_sriov_set_totalvfs(pdev, 7);
igb_enable_sriov(pdev, max_vfs); igb_enable_sriov(pdev, max_vfs, false);
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
} }
...@@ -9520,71 +9553,17 @@ static void igb_shutdown(struct pci_dev *pdev) ...@@ -9520,71 +9553,17 @@ static void igb_shutdown(struct pci_dev *pdev)
} }
} }
#ifdef CONFIG_PCI_IOV
static int igb_sriov_reinit(struct pci_dev *dev)
{
struct net_device *netdev = pci_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
rtnl_lock();
if (netif_running(netdev))
igb_close(netdev);
else
igb_reset(adapter);
igb_clear_interrupt_scheme(adapter);
igb_init_queue_configuration(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
rtnl_unlock();
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
if (netif_running(netdev))
igb_open(netdev);
rtnl_unlock();
return 0;
}
static int igb_pci_disable_sriov(struct pci_dev *dev)
{
int err = igb_disable_sriov(dev);
if (!err)
err = igb_sriov_reinit(dev);
return err;
}
static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
{
int err = igb_enable_sriov(dev, num_vfs);
if (err)
goto out;
err = igb_sriov_reinit(dev);
if (!err)
return num_vfs;
out:
return err;
}
#endif
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{ {
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
if (num_vfs == 0) int err;
return igb_pci_disable_sriov(dev);
else if (num_vfs == 0) {
return igb_pci_enable_sriov(dev, num_vfs); return igb_disable_sriov(dev, true);
} else {
err = igb_enable_sriov(dev, num_vfs, true);
return err ? err : num_vfs;
}
#endif #endif
return 0; return 0;
} }
......
...@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) ...@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
igbvf_intr_msix_rx, 0, adapter->rx_ring->name, igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev); netdev);
if (err) if (err)
goto out; goto free_irq_tx;
adapter->rx_ring->itr_register = E1000_EITR(vector); adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr; adapter->rx_ring->itr_val = adapter->current_itr;
...@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) ...@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector, err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev); igbvf_msix_other, 0, netdev->name, netdev);
if (err) if (err)
goto out; goto free_irq_rx;
igbvf_configure_msix(adapter); igbvf_configure_msix(adapter);
return 0; return 0;
free_irq_rx:
free_irq(adapter->msix_entries[--vector].vector, netdev);
free_irq_tx:
free_irq(adapter->msix_entries[--vector].vector, netdev);
out: out:
return err; return err;
} }
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */ /* Copyright(c) 2009 - 2018 Intel Corporation. */
#include <linux/etherdevice.h>
#include "vf.h" #include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw); static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
...@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) ...@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */ /* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3); ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) { if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET | switch (msgbuf[0]) {
E1000_VT_MSGTYPE_ACK)) case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN); memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
else break;
case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
eth_zero_addr(hw->mac.perm_addr);
break;
default:
ret_val = -E1000_ERR_MAC_INIT; ret_val = -E1000_ERR_MAC_INIT;
}
} }
} }
......
...@@ -6010,18 +6010,18 @@ static bool validate_schedule(struct igc_adapter *adapter, ...@@ -6010,18 +6010,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->command != TC_TAPRIO_CMD_SET_GATES) if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false; return false;
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++)
if (e->gate_mask & BIT(i)) if (e->gate_mask & BIT(i)) {
queue_uses[i]++; queue_uses[i]++;
/* There are limitations: A single queue cannot be /* There are limitations: A single queue cannot
* opened and closed multiple times per cycle unless the * be opened and closed multiple times per cycle
* gate stays open. Check for it. * unless the gate stays open. Check for it.
*/ */
if (queue_uses[i] > 1 && if (queue_uses[i] > 1 &&
!(prev->gate_mask & BIT(i))) !(prev->gate_mask & BIT(i)))
return false; return false;
} }
} }
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment