Commit 911a8c96 authored by David Arinzon's avatar David Arinzon Committed by Jakub Kicinski

net: ena: Use tx_ring instead of xdp_ring for XDP channel TX

When an XDP program is loaded the existing channels in the driver split
into two halves:
- The first half of the channels contain RX and TX rings, these queues
  are used for receiving traffic and sending packets originating from
  kernel.
- The second half of the channels contain only a TX ring. These queues
  are used for sending packets that were redirected using XDP_TX
  or XDP_REDIRECT.

Referring to the queues in the second half of the channels as "xdp_ring"
can be confusing and may give the impression that ENA has the capability
to generate an additional special queue.

This patch ensures that the xdp_ring field is exclusively used to
describe the XDP TX queue that a specific RX queue needs to utilize when
forwarding packets with XDP TX and XDP REDIRECT, preserving the
integrity of the xdp_ring field in ena_ring.
Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarDavid Arinzon <darinzon@amazon.com>
Link: https://lore.kernel.org/r/20240101190855.18739-6-darinzon@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 23ec9749
...@@ -1746,8 +1746,8 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter, ...@@ -1746,8 +1746,8 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) { for (i = first_index; i < first_index + count; i++) {
netif_napi_del(&adapter->ena_napi[i].napi); netif_napi_del(&adapter->ena_napi[i].napi);
WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) && WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
adapter->ena_napi[i].xdp_ring); adapter->ena_napi[i].rx_ring);
} }
} }
...@@ -1762,12 +1762,10 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter, ...@@ -1762,12 +1762,10 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
netif_napi_add(adapter->netdev, &napi->napi, netif_napi_add(adapter->netdev, &napi->napi,
ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll); ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
if (!ENA_IS_XDP_INDEX(adapter, i)) { if (!ENA_IS_XDP_INDEX(adapter, i))
napi->rx_ring = &adapter->rx_ring[i]; napi->rx_ring = &adapter->rx_ring[i];
napi->tx_ring = &adapter->tx_ring[i]; napi->tx_ring = &adapter->tx_ring[i];
} else {
napi->xdp_ring = &adapter->tx_ring[i];
}
napi->qid = i; napi->qid = i;
} }
} }
......
...@@ -125,7 +125,6 @@ struct ena_napi { ...@@ -125,7 +125,6 @@ struct ena_napi {
struct napi_struct napi; struct napi_struct napi;
struct ena_ring *tx_ring; struct ena_ring *tx_ring;
struct ena_ring *rx_ring; struct ena_ring *rx_ring;
struct ena_ring *xdp_ring;
u32 qid; u32 qid;
struct dim dim; struct dim dim;
}; };
......
...@@ -5,23 +5,23 @@ ...@@ -5,23 +5,23 @@
#include "ena_xdp.h" #include "ena_xdp.h"
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
{ {
struct ena_tx_buffer *tx_info; struct ena_tx_buffer *tx_info;
tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info = &tx_ring->tx_buffer_info[req_id];
if (likely(tx_info->xdpf)) if (likely(tx_info->xdpf))
return 0; return 0;
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
} }
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info, struct ena_tx_buffer *tx_info,
struct xdp_frame *xdpf, struct xdp_frame *xdpf,
struct ena_com_tx_ctx *ena_tx_ctx) struct ena_com_tx_ctx *ena_tx_ctx)
{ {
struct ena_adapter *adapter = xdp_ring->adapter; struct ena_adapter *adapter = tx_ring->adapter;
struct ena_com_buf *ena_buf; struct ena_com_buf *ena_buf;
int push_len = 0; int push_len = 0;
dma_addr_t dma; dma_addr_t dma;
...@@ -32,9 +32,9 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, ...@@ -32,9 +32,9 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
data = tx_info->xdpf->data; data = tx_info->xdpf->data;
size = tx_info->xdpf->len; size = tx_info->xdpf->len;
if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
/* Designate part of the packet for LLQ */ /* Designate part of the packet for LLQ */
push_len = min_t(u32, size, xdp_ring->tx_max_header_size); push_len = min_t(u32, size, tx_ring->tx_max_header_size);
ena_tx_ctx->push_header = data; ena_tx_ctx->push_header = data;
...@@ -45,11 +45,11 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, ...@@ -45,11 +45,11 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
ena_tx_ctx->header_len = push_len; ena_tx_ctx->header_len = push_len;
if (size > 0) { if (size > 0) {
dma = dma_map_single(xdp_ring->dev, dma = dma_map_single(tx_ring->dev,
data, data,
size, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(xdp_ring->dev, dma))) if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto error_report_dma_error; goto error_report_dma_error;
tx_info->map_linear_data = 0; tx_info->map_linear_data = 0;
...@@ -65,14 +65,14 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, ...@@ -65,14 +65,14 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
return 0; return 0;
error_report_dma_error: error_report_dma_error:
ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1, ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
&xdp_ring->syncp); &tx_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
return -EINVAL; return -EINVAL;
} }
int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
struct ena_adapter *adapter, struct ena_adapter *adapter,
struct xdp_frame *xdpf, struct xdp_frame *xdpf,
int flags) int flags)
...@@ -82,19 +82,19 @@ int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, ...@@ -82,19 +82,19 @@ int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
u16 next_to_use, req_id; u16 next_to_use, req_id;
int rc; int rc;
next_to_use = xdp_ring->next_to_use; next_to_use = tx_ring->next_to_use;
req_id = xdp_ring->free_ids[next_to_use]; req_id = tx_ring->free_ids[next_to_use];
tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0; tx_info->num_of_bufs = 0;
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx); rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc)) if (unlikely(rc))
return rc; return rc;
ena_tx_ctx.req_id = req_id; ena_tx_ctx.req_id = req_id;
rc = ena_xmit_common(adapter, rc = ena_xmit_common(adapter,
xdp_ring, tx_ring,
tx_info, tx_info,
&ena_tx_ctx, &ena_tx_ctx,
next_to_use, next_to_use,
...@@ -106,12 +106,12 @@ int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, ...@@ -106,12 +106,12 @@ int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
* calls a memory barrier inside it. * calls a memory barrier inside it.
*/ */
if (flags & XDP_XMIT_FLUSH) if (flags & XDP_XMIT_FLUSH)
ena_ring_tx_doorbell(xdp_ring); ena_ring_tx_doorbell(tx_ring);
return rc; return rc;
error_unmap_dma: error_unmap_dma:
ena_unmap_tx_buff(xdp_ring, tx_info); ena_unmap_tx_buff(tx_ring, tx_info);
tx_info->xdpf = NULL; tx_info->xdpf = NULL;
return rc; return rc;
} }
...@@ -120,7 +120,7 @@ int ena_xdp_xmit(struct net_device *dev, int n, ...@@ -120,7 +120,7 @@ int ena_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags) struct xdp_frame **frames, u32 flags)
{ {
struct ena_adapter *adapter = netdev_priv(dev); struct ena_adapter *adapter = netdev_priv(dev);
struct ena_ring *xdp_ring; struct ena_ring *tx_ring;
int qid, i, nxmit = 0; int qid, i, nxmit = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
...@@ -135,22 +135,22 @@ int ena_xdp_xmit(struct net_device *dev, int n, ...@@ -135,22 +135,22 @@ int ena_xdp_xmit(struct net_device *dev, int n,
qid = smp_processor_id() % adapter->xdp_num_queues; qid = smp_processor_id() % adapter->xdp_num_queues;
qid += adapter->xdp_first_ring; qid += adapter->xdp_first_ring;
xdp_ring = &adapter->tx_ring[qid]; tx_ring = &adapter->tx_ring[qid];
/* Other CPU ids might try to send thorugh this queue */ /* Other CPU ids might try to send thorugh this queue */
spin_lock(&xdp_ring->xdp_tx_lock); spin_lock(&tx_ring->xdp_tx_lock);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (ena_xdp_xmit_frame(xdp_ring, adapter, frames[i], 0)) if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
break; break;
nxmit++; nxmit++;
} }
/* Ring doorbell to make device aware of the packets */ /* Ring doorbell to make device aware of the packets */
if (flags & XDP_XMIT_FLUSH) if (flags & XDP_XMIT_FLUSH)
ena_ring_tx_doorbell(xdp_ring); ena_ring_tx_doorbell(tx_ring);
spin_unlock(&xdp_ring->xdp_tx_lock); spin_unlock(&tx_ring->xdp_tx_lock);
/* Return number of packets sent */ /* Return number of packets sent */
return nxmit; return nxmit;
...@@ -355,7 +355,7 @@ int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) ...@@ -355,7 +355,7 @@ int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
return 0; return 0;
} }
static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
{ {
u32 total_done = 0; u32 total_done = 0;
u16 next_to_clean; u16 next_to_clean;
...@@ -363,55 +363,54 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) ...@@ -363,55 +363,54 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
u16 req_id; u16 req_id;
int rc; int rc;
if (unlikely(!xdp_ring)) if (unlikely(!tx_ring))
return 0; return 0;
next_to_clean = xdp_ring->next_to_clean; next_to_clean = tx_ring->next_to_clean;
while (tx_pkts < budget) { while (tx_pkts < budget) {
struct ena_tx_buffer *tx_info; struct ena_tx_buffer *tx_info;
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id); &req_id);
if (rc) { if (rc) {
if (unlikely(rc == -EINVAL)) if (unlikely(rc == -EINVAL))
handle_invalid_req_id(xdp_ring, req_id, NULL, handle_invalid_req_id(tx_ring, req_id, NULL, true);
true);
break; break;
} }
/* validate that the request id points to a valid xdp_frame */ /* validate that the request id points to a valid xdp_frame */
rc = validate_xdp_req_id(xdp_ring, req_id); rc = validate_xdp_req_id(tx_ring, req_id);
if (rc) if (rc)
break; break;
tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info = &tx_ring->tx_buffer_info[req_id];
xdpf = tx_info->xdpf; xdpf = tx_info->xdpf;
tx_info->xdpf = NULL; tx_info->xdpf = NULL;
tx_info->last_jiffies = 0; tx_info->last_jiffies = 0;
ena_unmap_tx_buff(xdp_ring, tx_info); ena_unmap_tx_buff(tx_ring, tx_info);
netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d skb %p completed\n", xdp_ring->qid, "tx_poll: q %d skb %p completed\n", tx_ring->qid,
xdpf); xdpf);
tx_pkts++; tx_pkts++;
total_done += tx_info->tx_descs; total_done += tx_info->tx_descs;
xdp_return_frame(xdpf); xdp_return_frame(xdpf);
xdp_ring->free_ids[next_to_clean] = req_id; tx_ring->free_ids[next_to_clean] = req_id;
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
xdp_ring->ring_size); tx_ring->ring_size);
} }
xdp_ring->next_to_clean = next_to_clean; tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done); ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq); ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d done. total pkts: %d\n", "tx_poll: q %d done. total pkts: %d\n",
xdp_ring->qid, tx_pkts); tx_ring->qid, tx_pkts);
return tx_pkts; return tx_pkts;
} }
...@@ -423,43 +422,43 @@ int ena_xdp_io_poll(struct napi_struct *napi, int budget) ...@@ -423,43 +422,43 @@ int ena_xdp_io_poll(struct napi_struct *napi, int budget)
{ {
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
u32 xdp_work_done, xdp_budget; u32 xdp_work_done, xdp_budget;
struct ena_ring *xdp_ring; struct ena_ring *tx_ring;
int napi_comp_call = 0; int napi_comp_call = 0;
int ret; int ret;
xdp_ring = ena_napi->xdp_ring; tx_ring = ena_napi->tx_ring;
xdp_budget = budget; xdp_budget = budget;
if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
napi_complete_done(napi, 0); napi_complete_done(napi, 0);
return 0; return 0;
} }
xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); xdp_work_done = ena_clean_xdp_irq(tx_ring, xdp_budget);
/* If the device is about to reset or down, avoid unmask /* If the device is about to reset or down, avoid unmask
* the interrupt and return 0 so NAPI won't reschedule * the interrupt and return 0 so NAPI won't reschedule
*/ */
if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
napi_complete_done(napi, 0); napi_complete_done(napi, 0);
ret = 0; ret = 0;
} else if (xdp_budget > xdp_work_done) { } else if (xdp_budget > xdp_work_done) {
napi_comp_call = 1; napi_comp_call = 1;
if (napi_complete_done(napi, xdp_work_done)) if (napi_complete_done(napi, xdp_work_done))
ena_unmask_interrupt(xdp_ring, NULL); ena_unmask_interrupt(tx_ring, NULL);
ena_update_ring_numa_node(xdp_ring, NULL); ena_update_ring_numa_node(tx_ring, NULL);
ret = xdp_work_done; ret = xdp_work_done;
} else { } else {
ret = xdp_budget; ret = xdp_budget;
} }
u64_stats_update_begin(&xdp_ring->syncp); u64_stats_update_begin(&tx_ring->syncp);
xdp_ring->tx_stats.napi_comp += napi_comp_call; tx_ring->tx_stats.napi_comp += napi_comp_call;
xdp_ring->tx_stats.tx_poll++; tx_ring->tx_stats.tx_poll++;
u64_stats_update_end(&xdp_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
xdp_ring->tx_stats.last_napi_jiffies = jiffies; tx_ring->tx_stats.last_napi_jiffies = jiffies;
return ret; return ret;
} }
...@@ -35,7 +35,7 @@ void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, ...@@ -35,7 +35,7 @@ void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
struct bpf_prog *prog, struct bpf_prog *prog,
int first, int count); int first, int count);
int ena_xdp_io_poll(struct napi_struct *napi, int budget); int ena_xdp_io_poll(struct napi_struct *napi, int budget);
int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
struct ena_adapter *adapter, struct ena_adapter *adapter,
struct xdp_frame *xdpf, struct xdp_frame *xdpf,
int flags); int flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment