Commit a8ee104f authored by David Arinzon's avatar David Arinzon Committed by David S. Miller

net: ena: Update NUMA TPH hint register upon NUMA node update

The device supports a PCIe optimization hint, which indicates on
which NUMA the queue is currently processed. This hint is utilized
by PCIe in order to reduce its access time by accessing the
correct NUMA resources and maintaining cache coherence.

The driver calls the register update for the hint (called TPH -
TLP Processing Hint) during the NAPI loop.

Though the update is expected upon a NUMA change (when a queue
is moved from one NUMA to the other), the current logic performs
a register update when the queue is moved to a different CPU,
but the CPU is not necessarily in a different NUMA.

The changes include:
1. Performing the TPH update only when the queue has switched
a NUMA node.
2. Moving the TPH update call to be triggered only when NAPI was
scheduled from interrupt context, as opposed to a busy-polling loop.
This is due to the fact that during busy-polling, the frequency
of CPU switches for a particular queue is significantly higher,
thus, the likelihood to switch NUMA is much higher. Therefore,
providing the frequent updates to the device upon a NUMA update
are unlikely to be beneficial.

Fixes: 1738cd3e ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
Signed-off-by: default avatarDavid Arinzon <darinzon@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e712f3e4
......@@ -680,6 +680,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
ring->cpu = 0;
ring->numa_node = 0;
ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
......@@ -783,6 +784,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu;
tx_ring->numa_node = node;
return 0;
err_push_buf_intermediate_buf:
......@@ -915,6 +917,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->cpu = ena_irq->cpu;
rx_ring->numa_node = node;
return 0;
}
......@@ -1863,20 +1866,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (likely(tx_ring->cpu == cpu))
goto out;
tx_ring->cpu = cpu;
if (rx_ring)
rx_ring->cpu = cpu;
numa_node = cpu_to_node(cpu);
if (likely(tx_ring->numa_node == numa_node))
goto out;
put_cpu();
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
if (rx_ring)
tx_ring->numa_node = numa_node;
if (rx_ring) {
rx_ring->numa_node = numa_node;
ena_com_update_numa_node(rx_ring->ena_com_io_cq,
numa_node);
}
}
tx_ring->cpu = cpu;
if (rx_ring)
rx_ring->cpu = cpu;
return;
out:
put_cpu();
......@@ -1997,11 +2007,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_adaptive_rx_intr_moderation(ena_napi);
ena_update_ring_numa_node(tx_ring, rx_ring);
ena_unmask_interrupt(tx_ring, rx_ring);
}
ena_update_ring_numa_node(tx_ring, rx_ring);
ret = rx_work_done;
} else {
ret = budget;
......@@ -2386,7 +2395,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.msix_vector = msix_vector;
ctx.queue_size = tx_ring->ring_size;
ctx.numa_node = cpu_to_node(tx_ring->cpu);
ctx.numa_node = tx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
......@@ -2454,7 +2463,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector;
ctx.queue_size = rx_ring->ring_size;
ctx.numa_node = cpu_to_node(rx_ring->cpu);
ctx.numa_node = rx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
......
......@@ -262,9 +262,11 @@ struct ena_ring {
bool disable_meta_caching;
u16 no_interrupt_event_cnt;
/* cpu for TPH */
/* cpu and NUMA for TPH */
int cpu;
/* number of tx/rx_buffer_info's entries */
int numa_node;
/* number of tx/rx_buffer_info's entries */
int ring_size;
enum ena_admin_placement_policy_type tx_mem_queue_type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment