Commit ad974bae authored by Netanel Belgazal's avatar Netanel Belgazal Committed by David S. Miller

net: ena: add support for out of order rx buffers refill

ENA driver post Rx buffers through the Rx submission queue
for the ENA device to fill them with receive packets.
Each Rx buffer is marked with req_id in the Rx descriptor.

Newer ENA devices could consume the posted Rx buffer in out of order,
and as result the corresponding Rx completion queue will have Rx
completion descriptors with non contiguous req_id(s)

In this change the driver holds two rings.
The first ring (called free_rx_ids) is a mapping ring.
It holds all the unused request ids.
The values in this ring are from 0 to ring_size -1.

When the driver wants to allocate a new Rx buffer it uses the head of
free_rx_ids and uses it's value as the index for rx_buffer_info ring.
The req_id is also written to the Rx descriptor

Upon Rx completion,
The driver took the req_id from the completion descriptor and uses it
as index in rx_buffer_info.
The req_id is then return to the free_rx_ids ring.

This patch also adds statistics to inform when the driver receive out
of range or unused req_id.

Note:
free_rx_ids is only accessible from the napi handler, so no locking is
required
Signed-off-by: default avatarNetanel Belgazal <netanel@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e2eed0e3
...@@ -493,6 +493,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) ...@@ -493,6 +493,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
if (cdesc_phase != expected_phase) if (cdesc_phase != expected_phase)
return -EAGAIN; return -EAGAIN;
if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
pr_err("Invalid req id %d\n", cdesc->req_id);
return -EINVAL;
}
ena_com_cq_inc_head(io_cq); ena_com_cq_inc_head(io_cq);
*req_id = READ_ONCE(cdesc->req_id); *req_id = READ_ONCE(cdesc->req_id);
......
...@@ -93,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ...@@ -93,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(bad_desc_num),
ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring), ENA_STAT_RX_ENTRY(empty_rx_ring),
}; };
......
...@@ -304,6 +304,24 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) ...@@ -304,6 +304,24 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
ena_free_tx_resources(adapter, i); ena_free_tx_resources(adapter, i);
} }
static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
{
if (likely(req_id < rx_ring->ring_size))
return 0;
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Invalid rx req_id: %hu\n", req_id);
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bad_req_id++;
u64_stats_update_end(&rx_ring->syncp);
/* Trigger device reset */
rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
return -EFAULT;
}
/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
* @adapter: network interface device structure * @adapter: network interface device structure
* @qid: queue index * @qid: queue index
...@@ -315,7 +333,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter, ...@@ -315,7 +333,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
{ {
struct ena_ring *rx_ring = &adapter->rx_ring[qid]; struct ena_ring *rx_ring = &adapter->rx_ring[qid];
struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
int size, node; int size, node, i;
if (rx_ring->rx_buffer_info) { if (rx_ring->rx_buffer_info) {
netif_err(adapter, ifup, adapter->netdev, netif_err(adapter, ifup, adapter->netdev,
...@@ -336,6 +354,20 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter, ...@@ -336,6 +354,20 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
return -ENOMEM; return -ENOMEM;
} }
size = sizeof(u16) * rx_ring->ring_size;
rx_ring->free_rx_ids = vzalloc_node(size, node);
if (!rx_ring->free_rx_ids) {
rx_ring->free_rx_ids = vzalloc(size);
if (!rx_ring->free_rx_ids) {
vfree(rx_ring->rx_buffer_info);
return -ENOMEM;
}
}
/* Req id ring for receiving RX pkts out of order */
for (i = 0; i < rx_ring->ring_size; i++)
rx_ring->free_rx_ids[i] = i;
/* Reset rx statistics */ /* Reset rx statistics */
memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
...@@ -359,6 +391,9 @@ static void ena_free_rx_resources(struct ena_adapter *adapter, ...@@ -359,6 +391,9 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; rx_ring->rx_buffer_info = NULL;
vfree(rx_ring->free_rx_ids);
rx_ring->free_rx_ids = NULL;
} }
/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
...@@ -464,15 +499,22 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, ...@@ -464,15 +499,22 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
{ {
u16 next_to_use; u16 next_to_use, req_id;
u32 i; u32 i;
int rc; int rc;
next_to_use = rx_ring->next_to_use; next_to_use = rx_ring->next_to_use;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
struct ena_rx_buffer *rx_info = struct ena_rx_buffer *rx_info;
&rx_ring->rx_buffer_info[next_to_use];
req_id = rx_ring->free_rx_ids[next_to_use];
rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc < 0))
break;
rx_info = &rx_ring->rx_buffer_info[req_id];
rc = ena_alloc_rx_page(rx_ring, rx_info, rc = ena_alloc_rx_page(rx_ring, rx_info,
__GFP_COLD | GFP_ATOMIC | __GFP_COMP); __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
...@@ -484,7 +526,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) ...@@ -484,7 +526,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
} }
rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
&rx_info->ena_buf, &rx_info->ena_buf,
next_to_use); req_id);
if (unlikely(rc)) { if (unlikely(rc)) {
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
"failed to add buffer for rx queue %d\n", "failed to add buffer for rx queue %d\n",
...@@ -789,13 +831,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -789,13 +831,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u16 *next_to_clean) u16 *next_to_clean)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct ena_rx_buffer *rx_info = struct ena_rx_buffer *rx_info;
&rx_ring->rx_buffer_info[*next_to_clean]; u16 len, req_id, buf = 0;
u32 len;
u32 buf = 0;
void *va; void *va;
len = ena_bufs[0].len; len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) { if (unlikely(!rx_info->page)) {
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Page is NULL\n"); "Page is NULL\n");
...@@ -867,13 +910,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -867,13 +910,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb->len, skb->data_len); skb->len, skb->data_len);
rx_info->page = NULL; rx_info->page = NULL;
rx_ring->free_rx_ids[*next_to_clean] = req_id;
*next_to_clean = *next_to_clean =
ENA_RX_RING_IDX_NEXT(*next_to_clean, ENA_RX_RING_IDX_NEXT(*next_to_clean,
rx_ring->ring_size); rx_ring->ring_size);
if (likely(--descs == 0)) if (likely(--descs == 0))
break; break;
rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
len = ena_bufs[++buf].len; buf++;
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
} while (1); } while (1);
return skb; return skb;
...@@ -974,6 +1022,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -974,6 +1022,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int rc = 0; int rc = 0;
int total_len = 0; int total_len = 0;
int rx_copybreak_pkt = 0; int rx_copybreak_pkt = 0;
int i;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid); "%s qid %d\n", __func__, rx_ring->qid);
...@@ -1003,9 +1052,13 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1003,9 +1052,13 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) { if (unlikely(!skb)) {
next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean, for (i = 0; i < ena_rx_ctx.descs; i++) {
ena_rx_ctx.descs, rx_ring->free_tx_ids[next_to_clean] =
rx_ring->ring_size); rx_ring->ena_bufs[i].req_id;
next_to_clean =
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
}
break; break;
} }
......
...@@ -194,12 +194,19 @@ struct ena_stats_rx { ...@@ -194,12 +194,19 @@ struct ena_stats_rx {
u64 dma_mapping_err; u64 dma_mapping_err;
u64 bad_desc_num; u64 bad_desc_num;
u64 rx_copybreak_pkt; u64 rx_copybreak_pkt;
u64 bad_req_id;
u64 empty_rx_ring; u64 empty_rx_ring;
}; };
struct ena_ring { struct ena_ring {
/* Holds the empty requests for TX out of order completions */ union {
u16 *free_tx_ids; /* Holds the empty requests for TX/RX
* out of order completions
*/
u16 *free_tx_ids;
u16 *free_rx_ids;
};
union { union {
struct ena_tx_buffer *tx_buffer_info; struct ena_tx_buffer *tx_buffer_info;
struct ena_rx_buffer *rx_buffer_info; struct ena_rx_buffer *rx_buffer_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment