Commit 16be45bc authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Do not manipulate macvlan Tx queues when performing macvlan offload

We should not be stopping/starting the upper devices Tx queues when
handling a macvlan offload. Instead we should be stopping and starting
traffic on our own queues.

In order to prevent us from doing this I am updating the code so that we no
longer change the queue configuration on the upper device, nor do we update
the queue_index on our own device. Instead we can just use the queue index
for our local device and not update the netdev in the case of the transmit
rings.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 58918df0
...@@ -920,11 +920,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -920,11 +920,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */ /* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count; ring->count = adapter->tx_ring_count;
if (adapter->num_rx_pools > 1) ring->queue_index = txr_idx;
ring->queue_index =
txr_idx % adapter->num_rx_queues_per_pool;
else
ring->queue_index = txr_idx;
/* assign ring to adapter */ /* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring; adapter->tx_ring[txr_idx] = ring;
...@@ -994,11 +990,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -994,11 +990,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */ /* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count; ring->count = adapter->rx_ring_count;
if (adapter->num_rx_pools > 1) ring->queue_index = rxr_idx;
ring->queue_index =
rxr_idx % adapter->num_rx_queues_per_pool;
else
ring->queue_index = rxr_idx;
/* assign ring to adapter */ /* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring; adapter->rx_ring[rxr_idx] = ring;
......
...@@ -5341,12 +5341,11 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, ...@@ -5341,12 +5341,11 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
struct ixgbe_ring *rx_ring) struct ixgbe_ring *rx_ring)
{ {
struct ixgbe_adapter *adapter = vadapter->real_adapter; struct ixgbe_adapter *adapter = vadapter->real_adapter;
int index = rx_ring->queue_index + vadapter->rx_base_queue;
/* shutdown specific queue receive and wait for dma to settle */ /* shutdown specific queue receive and wait for dma to settle */
ixgbe_disable_rx_queue(adapter, rx_ring); ixgbe_disable_rx_queue(adapter, rx_ring);
usleep_range(10000, 20000); usleep_range(10000, 20000);
ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); ixgbe_irq_disable_queues(adapter, BIT_ULL(rx_ring->queue_index));
ixgbe_clean_rx_ring(rx_ring); ixgbe_clean_rx_ring(rx_ring);
} }
...@@ -5355,20 +5354,13 @@ static int ixgbe_fwd_ring_down(struct net_device *vdev, ...@@ -5355,20 +5354,13 @@ static int ixgbe_fwd_ring_down(struct net_device *vdev,
{ {
struct ixgbe_adapter *adapter = accel->real_adapter; struct ixgbe_adapter *adapter = accel->real_adapter;
unsigned int rxbase = accel->rx_base_queue; unsigned int rxbase = accel->rx_base_queue;
unsigned int txbase = accel->tx_base_queue;
int i; int i;
netif_tx_stop_all_queues(vdev);
for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
} }
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
return 0; return 0;
} }
...@@ -5376,8 +5368,7 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, ...@@ -5376,8 +5368,7 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
struct ixgbe_fwd_adapter *accel) struct ixgbe_fwd_adapter *accel)
{ {
struct ixgbe_adapter *adapter = accel->real_adapter; struct ixgbe_adapter *adapter = accel->real_adapter;
unsigned int rxbase, txbase, queues; int i, baseq, err;
int i, baseq, err = 0;
if (!test_bit(accel->pool, adapter->fwd_bitmask)) if (!test_bit(accel->pool, adapter->fwd_bitmask))
return 0; return 0;
...@@ -5388,30 +5379,17 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, ...@@ -5388,30 +5379,17 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
baseq, baseq + adapter->num_rx_queues_per_pool); baseq, baseq + adapter->num_rx_queues_per_pool);
accel->netdev = vdev; accel->netdev = vdev;
accel->rx_base_queue = rxbase = baseq; accel->rx_base_queue = baseq;
accel->tx_base_queue = txbase = baseq; accel->tx_base_queue = baseq;
for (i = 0; i < adapter->num_rx_queues_per_pool; i++) for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); ixgbe_disable_fwd_ring(accel, adapter->rx_ring[baseq + i]);
for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
adapter->rx_ring[rxbase + i]->netdev = vdev; adapter->rx_ring[baseq + i]->netdev = vdev;
ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); ixgbe_configure_rx_ring(adapter, adapter->rx_ring[baseq + i]);
} }
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->tx_ring[txbase + i]->netdev = vdev;
queues = min_t(unsigned int,
adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
err = netif_set_real_num_tx_queues(vdev, queues);
if (err)
goto fwd_queue_err;
err = netif_set_real_num_rx_queues(vdev, queues);
if (err)
goto fwd_queue_err;
/* ixgbe_add_mac_filter will return an index if it succeeds, so we /* ixgbe_add_mac_filter will return an index if it succeeds, so we
* need to only treat it as an error value if it is negative. * need to only treat it as an error value if it is negative.
*/ */
...@@ -5899,21 +5877,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) ...@@ -5899,21 +5877,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock); spin_unlock(&adapter->fdir_perfect_lock);
} }
static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
{
if (netif_is_macvlan(upper)) {
struct macvlan_dev *vlan = netdev_priv(upper);
if (vlan->fwd_priv) {
netif_tx_stop_all_queues(upper);
netif_carrier_off(upper);
netif_tx_disable(upper);
}
}
return 0;
}
void ixgbe_down(struct ixgbe_adapter *adapter) void ixgbe_down(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
...@@ -5943,10 +5906,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ...@@ -5943,10 +5906,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_tx_disable(netdev); netif_tx_disable(netdev);
/* disable any upper devices */
netdev_walk_all_upper_dev_rcu(adapter->netdev,
ixgbe_disable_macvlan, NULL);
ixgbe_irq_disable(adapter); ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter); ixgbe_napi_disable_all(adapter);
...@@ -7262,18 +7221,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) ...@@ -7262,18 +7221,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
#endif #endif
} }
static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
{
if (netif_is_macvlan(upper)) {
struct macvlan_dev *vlan = netdev_priv(upper);
if (vlan->fwd_priv)
netif_tx_wake_all_queues(upper);
}
return 0;
}
/** /**
* ixgbe_watchdog_link_is_up - update netif_carrier status and * ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message * print link up message
...@@ -7354,12 +7301,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) ...@@ -7354,12 +7301,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/* enable transmits */ /* enable transmits */
netif_tx_wake_all_queues(adapter->netdev); netif_tx_wake_all_queues(adapter->netdev);
/* enable any upper devices */
rtnl_lock();
netdev_walk_all_upper_dev_rcu(adapter->netdev,
ixgbe_enable_macvlan, NULL);
rtnl_unlock();
/* update the default user priority for VFs */ /* update the default user priority for VFs */
ixgbe_update_default_up(adapter); ixgbe_update_default_up(adapter);
...@@ -8320,14 +8261,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -8320,14 +8261,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback) void *accel_priv, select_queue_fallback_t fallback)
{ {
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter; struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
int txq; int txq;
#ifdef IXGBE_FCOE
struct ixgbe_ring_feature *f;
#endif #endif
if (fwd_adapter) if (fwd_adapter) {
return skb->queue_mapping + fwd_adapter->tx_base_queue; adapter = netdev_priv(dev);
txq = reciprocal_scale(skb_get_hash(skb),
adapter->num_rx_queues_per_pool);
return txq + fwd_adapter->tx_base_queue;
}
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
...@@ -9816,22 +9762,6 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) ...@@ -9816,22 +9762,6 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
#ifdef CONFIG_RPS
if (vdev->num_rx_queues != vdev->num_tx_queues) {
netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
vdev->name);
return ERR_PTR(-EINVAL);
}
#endif
/* Check for hardware restriction on number of rx/tx queues */
if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
netdev_info(pdev,
"%s: Supports RX/TX Queue counts 1,2, and 4\n",
pdev->name);
return ERR_PTR(-EINVAL);
}
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
...@@ -9848,24 +9778,19 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) ...@@ -9848,24 +9778,19 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
/* Enable VMDq flag so device will be set in VM mode */ /* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
/* Force reinit of ring allocation with VMDQ enabled */
err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
if (err)
goto fwd_add_err;
fwd_adapter->pool = pool; fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter; fwd_adapter->real_adapter = adapter;
if (netif_running(pdev)) { /* Force reinit of ring allocation with VMDQ enabled */
err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
if (!err && netif_running(pdev))
err = ixgbe_fwd_ring_up(vdev, fwd_adapter); err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
if (err)
goto fwd_add_err;
netif_tx_start_all_queues(vdev);
}
return fwd_adapter; if (!err)
fwd_add_err: return fwd_adapter;
/* unwind counter and free adapter struct */ /* unwind counter and free adapter struct */
netdev_info(pdev, netdev_info(pdev,
"%s: dfwd hardware acceleration failed\n", vdev->name); "%s: dfwd hardware acceleration failed\n", vdev->name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment