Commit 4a0b9ca0 authored by PJ Waskiewicz's avatar PJ Waskiewicz Committed by David S. Miller

ixgbe: Make descriptor ring allocations NUMA-aware

This patch allocates the ring structures themselves on each
NUMA node along with the buffer_info structures.  This way we
don't allocate the entire ring memory on a single node in one
big block, thus reducing NUMA node memory crosstalk.
Signed-off-by: default avatarPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1a6c14a2
...@@ -175,6 +175,7 @@ struct ixgbe_ring { ...@@ -175,6 +175,7 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats; struct ixgbe_queue_stats stats;
unsigned long reinit_state; unsigned long reinit_state;
int numa_node;
u64 rsc_count; /* stat for coalesced packets */ u64 rsc_count; /* stat for coalesced packets */
u64 rsc_flush; /* stats for flushed packets */ u64 rsc_flush; /* stats for flushed packets */
u32 restart_queue; /* track tx queue restarts */ u32 restart_queue; /* track tx queue restarts */
...@@ -293,7 +294,7 @@ struct ixgbe_adapter { ...@@ -293,7 +294,7 @@ struct ixgbe_adapter {
u16 eitr_high; u16 eitr_high;
/* TX */ /* TX */
struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues; int num_tx_queues;
u32 tx_timeout_count; u32 tx_timeout_count;
bool detect_tx_hung; bool detect_tx_hung;
...@@ -302,7 +303,7 @@ struct ixgbe_adapter { ...@@ -302,7 +303,7 @@ struct ixgbe_adapter {
u64 lsc_int; u64 lsc_int;
/* RX */ /* RX */
struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
int num_rx_queues; int num_rx_queues;
int num_rx_pools; /* == num_rx_queues in 82598 */ int num_rx_pools; /* == num_rx_queues in 82598 */
int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
......
...@@ -834,8 +834,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev, ...@@ -834,8 +834,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring) struct ethtool_ringparam *ring)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring; struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring; struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
ring->rx_max_pending = IXGBE_MAX_RXD; ring->rx_max_pending = IXGBE_MAX_RXD;
ring->tx_max_pending = IXGBE_MAX_TXD; ring->tx_max_pending = IXGBE_MAX_TXD;
...@@ -867,8 +867,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, ...@@ -867,8 +867,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring->count) && if ((new_tx_count == adapter->tx_ring[0]->count) &&
(new_rx_count == adapter->rx_ring->count)) { (new_rx_count == adapter->rx_ring[0]->count)) {
/* nothing to do */ /* nothing to do */
return 0; return 0;
} }
...@@ -878,25 +878,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev, ...@@ -878,25 +878,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) { if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count; adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].count = new_rx_count; adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
goto err_setup; goto clear_reset;
} }
temp_tx_ring = kcalloc(adapter->num_tx_queues, temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!temp_tx_ring) { if (!temp_tx_ring) {
err = -ENOMEM; err = -ENOMEM;
goto err_setup; goto clear_reset;
} }
if (new_tx_count != adapter->tx_ring_count) { if (new_tx_count != adapter->tx_ring_count) {
memcpy(temp_tx_ring, adapter->tx_ring,
adapter->num_tx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count; temp_tx_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(adapter, err = ixgbe_setup_tx_resources(adapter,
&temp_tx_ring[i]); &temp_tx_ring[i]);
...@@ -904,28 +903,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev, ...@@ -904,28 +903,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (i) { while (i) {
i--; i--;
ixgbe_free_tx_resources(adapter, ixgbe_free_tx_resources(adapter,
&temp_tx_ring[i]); &temp_tx_ring[i]);
} }
goto err_setup; goto clear_reset;
} }
} }
need_update = true; need_update = true;
} }
temp_rx_ring = kcalloc(adapter->num_rx_queues, temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
sizeof(struct ixgbe_ring), GFP_KERNEL); if (!temp_rx_ring) {
if ((!temp_rx_ring) && (need_update)) {
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
kfree(temp_tx_ring);
err = -ENOMEM; err = -ENOMEM;
goto err_setup; goto err_setup;
} }
if (new_rx_count != adapter->rx_ring_count) { if (new_rx_count != adapter->rx_ring_count) {
memcpy(temp_rx_ring, adapter->rx_ring,
adapter->num_rx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count; temp_rx_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter, err = ixgbe_setup_rx_resources(adapter,
&temp_rx_ring[i]); &temp_rx_ring[i]);
...@@ -947,22 +942,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev, ...@@ -947,22 +942,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */ /* tx */
if (new_tx_count != adapter->tx_ring_count) { if (new_tx_count != adapter->tx_ring_count) {
kfree(adapter->tx_ring); for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring = temp_tx_ring; ixgbe_free_tx_resources(adapter,
temp_tx_ring = NULL; adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
sizeof(struct ixgbe_ring));
}
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
} }
/* rx */ /* rx */
if (new_rx_count != adapter->rx_ring_count) { if (new_rx_count != adapter->rx_ring_count) {
kfree(adapter->rx_ring); for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring = temp_rx_ring; ixgbe_free_rx_resources(adapter,
temp_rx_ring = NULL; adapter->rx_ring[i]);
memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
sizeof(struct ixgbe_ring));
}
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
} }
ixgbe_up(adapter); ixgbe_up(adapter);
} }
vfree(temp_rx_ring);
err_setup: err_setup:
vfree(temp_tx_ring);
clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state); clear_bit(__IXGBE_RESETTING, &adapter->state);
return err; return err;
} }
...@@ -1007,13 +1012,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, ...@@ -1007,13 +1012,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
for (j = 0; j < adapter->num_tx_queues; j++) { for (j = 0; j < adapter->num_tx_queues; j++) {
queue_stat = (u64 *)&adapter->tx_ring[j].stats; queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
for (k = 0; k < stat_count; k++) for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k]; data[i + k] = queue_stat[k];
i += k; i += k;
} }
for (j = 0; j < adapter->num_rx_queues; j++) { for (j = 0; j < adapter->num_rx_queues; j++) {
queue_stat = (u64 *)&adapter->rx_ring[j].stats; queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
for (k = 0; k < stat_count; k++) for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k]; data[i + k] = queue_stat[k];
i += k; i += k;
...@@ -1627,7 +1632,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1627,7 +1632,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_RXDCTL_ENABLE; reg_data |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
if (adapter->hw.mac.type == ixgbe_mac_82599EB) { if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
int j = adapter->rx_ring[0].reg_idx; int j = adapter->rx_ring[0]->reg_idx;
u32 k; u32 k;
for (k = 0; k < 10; k++) { for (k = 0; k < 10; k++) {
if (IXGBE_READ_REG(&adapter->hw, if (IXGBE_READ_REG(&adapter->hw,
...@@ -2011,7 +2016,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev, ...@@ -2011,7 +2016,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
/* only valid if in constant ITR mode */ /* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) { switch (adapter->rx_itr_setting) {
...@@ -2064,7 +2069,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2064,7 +2069,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL; return -EINVAL;
if (ec->tx_max_coalesced_frames_irq) if (ec->tx_max_coalesced_frames_irq)
adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) { if (ec->rx_coalesce_usecs > 1) {
/* check the limits */ /* check the limits */
......
...@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices; fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
} }
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
...@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
} else { } else {
/* Use single rx queue for FCoE */ /* Use single rx queue for FCoE */
fcoe_i = f->mask; fcoe_i = f->mask;
fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN | IXGBE_ETQS_QUEUE_EN |
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment