Commit c0a3665f authored by Usha Ketineni's avatar Usha Ketineni Committed by Jeff Kirsher

ice: Fix to change Rx/Tx ring descriptor size via ethtool with DCBx

This patch fixes the call trace caused by the kernel when the Rx/Tx
descriptor size change request is initiated via ethtool when DCB is
configured. ice_set_ringparam() should use vsi->num_txq instead of
vsi->alloc_txq as it represents the queues that are enabled in the
driver when DCB is enabled/disabled. Otherwise, queue index being
used can go out of range.

For example, when vsi->alloc_txq has 104 queues and with 3 TCS enabled
via DCB, each TC gets 34 queues, vsi->num_txq will be 102 and only 102
queues will be enabled.
Signed-off-by: default avatarUsha Ketineni <usha.k.ketineni@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 5f8cc355
...@@ -2654,14 +2654,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2654,14 +2654,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
vsi->tx_rings[0]->count, new_tx_cnt); vsi->tx_rings[0]->count, new_tx_cnt);
tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
sizeof(*tx_rings), GFP_KERNEL); sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) { if (!tx_rings) {
err = -ENOMEM; err = -ENOMEM;
goto done; goto done;
} }
for (i = 0; i < vsi->alloc_txq; i++) { ice_for_each_txq(vsi, i) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt; tx_rings[i].count = new_tx_cnt;
...@@ -2714,14 +2714,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2714,14 +2714,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0]->count, new_rx_cnt); vsi->rx_rings[0]->count, new_rx_cnt);
rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq,
sizeof(*rx_rings), GFP_KERNEL); sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) { if (!rx_rings) {
err = -ENOMEM; err = -ENOMEM;
goto done; goto done;
} }
for (i = 0; i < vsi->alloc_rxq; i++) { ice_for_each_rxq(vsi, i) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt; rx_rings[i].count = new_rx_cnt;
...@@ -2759,7 +2759,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2759,7 +2759,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ice_down(vsi); ice_down(vsi);
if (tx_rings) { if (tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++) { ice_for_each_txq(vsi, i) {
ice_free_tx_ring(vsi->tx_rings[i]); ice_free_tx_ring(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i]; *vsi->tx_rings[i] = tx_rings[i];
} }
...@@ -2767,7 +2767,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2767,7 +2767,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
} }
if (rx_rings) { if (rx_rings) {
for (i = 0; i < vsi->alloc_rxq; i++) { ice_for_each_rxq(vsi, i) {
ice_free_rx_ring(vsi->rx_rings[i]); ice_free_rx_ring(vsi->rx_rings[i]);
/* copy the real tail offset */ /* copy the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail; rx_rings[i].tail = vsi->rx_rings[i]->tail;
...@@ -2801,7 +2801,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2801,7 +2801,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
free_tx: free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */ /* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) { if (tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++) ice_for_each_txq(vsi, i)
ice_free_tx_ring(&tx_rings[i]); ice_free_tx_ring(&tx_rings[i]);
devm_kfree(&pf->pdev->dev, tx_rings); devm_kfree(&pf->pdev->dev, tx_rings);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment