Commit 46f4c29d authored by Igor Russkikh's avatar Igor Russkikh Committed by David S. Miller

net: aquantia: optimize rx performance by page reuse strategy

We introduce internal aq_rxpage wrapper over regular page
where extra field is tracked: rxpage offset inside of allocated page.

This offset allows to reuse one page for multiple packets.
When needed (for example with large frames processing), allocated
pageorder could be customized. This gives even larger page reuse
efficiency.

page_ref_count is used to track page users. If during rx refill
underlying page has users, we increase pg_off by rx frame size
thus the top half of the page is reused.
Signed-off-by: default avatarIgor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7e2698c4
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#define AQ_CFG_RX_HDR_SIZE 256U #define AQ_CFG_RX_HDR_SIZE 256U
#define AQ_CFG_RX_PAGEORDER 0U
/* LRO */ /* LRO */
#define AQ_CFG_IS_LRO_DEF 1U #define AQ_CFG_IS_LRO_DEF 1U
......
...@@ -73,6 +73,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self) ...@@ -73,6 +73,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->tx_itr = aq_itr_tx; cfg->tx_itr = aq_itr_tx;
cfg->rx_itr = aq_itr_rx; cfg->rx_itr = aq_itr_rx;
cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
cfg->is_rss = AQ_CFG_IS_RSS_DEF; cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
......
...@@ -31,6 +31,7 @@ struct aq_nic_cfg_s { ...@@ -31,6 +31,7 @@ struct aq_nic_cfg_s {
u32 itr; u32 itr;
u16 rx_itr; u16 rx_itr;
u16 tx_itr; u16 tx_itr;
u32 rxpageorder;
u32 num_rss_queues; u32 num_rss_queues;
u32 mtu; u32 mtu;
u32 flow_control; u32 flow_control;
......
...@@ -12,10 +12,89 @@ ...@@ -12,10 +12,89 @@
#include "aq_ring.h" #include "aq_ring.h"
#include "aq_nic.h" #include "aq_nic.h"
#include "aq_hw.h" #include "aq_hw.h"
#include "aq_hw_utils.h"
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
/* Drop the ref for being in the ring. */
__free_pages(rxpage->page, rxpage->order);
rxpage->page = NULL;
}
static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
struct device *dev)
{
struct page *page;
dma_addr_t daddr;
int ret = -ENOMEM;
page = dev_alloc_pages(order);
if (unlikely(!page))
goto err_exit;
daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, daddr)))
goto free_page;
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
rxpage->pg_off = 0;
return 0;
free_page:
__free_pages(page, order);
err_exit:
return ret;
}
static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
int order)
{
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
(PAGE_SIZE << order)) {
self->stats.rx.pg_flips++;
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
*/
aq_free_rxpage(&rxbuf->rxdata,
aq_nic_get_dev(self->aq_nic));
self->stats.rx.pg_losts++;
}
} else {
rxbuf->rxdata.pg_off = 0;
self->stats.rx.pg_reuses++;
}
}
if (!rxbuf->rxdata.page) {
ret = aq_get_rxpage(&rxbuf->rxdata, order,
aq_nic_get_dev(self->aq_nic));
return ret;
}
return 0;
}
static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic) struct aq_nic_s *aq_nic)
{ {
...@@ -81,6 +160,11 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, ...@@ -81,6 +160,11 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx; self->idx = idx;
self->size = aq_nic_cfg->rxds; self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
(AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
if (aq_nic_cfg->rxpageorder > self->page_order)
self->page_order = aq_nic_cfg->rxpageorder;
self = aq_ring_alloc(self, aq_nic); self = aq_ring_alloc(self, aq_nic);
if (!self) { if (!self) {
...@@ -214,10 +298,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -214,10 +298,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
unsigned int i = 0U; unsigned int i = 0U;
u16 hdr_len; u16 hdr_len;
if (buff->is_error) { if (buff->is_error)
__free_pages(buff->page, 0);
continue; continue;
}
if (buff->is_cleaned) if (buff->is_cleaned)
continue; continue;
...@@ -247,16 +329,22 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -247,16 +329,22 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
} }
} }
dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
buff->rxdata.daddr,
buff->rxdata.pg_off,
buff->len, DMA_FROM_DEVICE);
/* for single fragment packets use build_skb() */ /* for single fragment packets use build_skb() */
if (buff->is_eop && if (buff->is_eop &&
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
skb = build_skb(page_address(buff->page), skb = build_skb(aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_FRAME_MAX); AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) { if (unlikely(!skb)) {
err = -ENOMEM; err = -ENOMEM;
goto err_exit; goto err_exit;
} }
skb_put(skb, buff->len); skb_put(skb, buff->len);
page_ref_inc(buff->rxdata.page);
} else { } else {
skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
...@@ -266,34 +354,41 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -266,34 +354,41 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
hdr_len = buff->len; hdr_len = buff->len;
if (hdr_len > AQ_CFG_RX_HDR_SIZE) if (hdr_len > AQ_CFG_RX_HDR_SIZE)
hdr_len = eth_get_headlen(page_address(buff->page), hdr_len = eth_get_headlen(aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_HDR_SIZE); AQ_CFG_RX_HDR_SIZE);
memcpy(__skb_put(skb, hdr_len), page_address(buff->page), memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
ALIGN(hdr_len, sizeof(long))); ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) { if (buff->len - hdr_len > 0) {
skb_add_rx_frag(skb, 0, buff->page, skb_add_rx_frag(skb, 0, buff->rxdata.page,
hdr_len, buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len, buff->len - hdr_len,
SKB_TRUESIZE(buff->len - hdr_len)); AQ_CFG_RX_FRAME_MAX);
page_ref_inc(buff->rxdata.page);
} }
if (!buff->is_eop) { if (!buff->is_eop) {
for (i = 1U, next_ = buff->next, buff_ = buff;
buff_ = &self->buff_ring[next_]; i = 1U;
true; next_ = buff_->next, do {
buff_ = &self->buff_ring[next_], ++i) { next_ = buff_->next,
skb_add_rx_frag(skb, i, buff_ = &self->buff_ring[next_];
buff_->page, 0,
dma_sync_single_range_for_cpu(
aq_nic_get_dev(self->aq_nic),
buff_->rxdata.daddr,
buff_->rxdata.pg_off,
buff_->len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, i++,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len, buff_->len,
SKB_TRUESIZE(buff->len - AQ_CFG_RX_FRAME_MAX);
ETH_HLEN)); page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1; buff_->is_cleaned = 1;
} while (!buff_->is_eop);
if (buff_->is_eop)
break;
}
} }
} }
...@@ -319,8 +414,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -319,8 +414,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int aq_ring_rx_fill(struct aq_ring_s *self) int aq_ring_rx_fill(struct aq_ring_s *self)
{ {
unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + unsigned int page_order = self->page_order;
(AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
struct aq_ring_buff_s *buff = NULL; struct aq_ring_buff_s *buff = NULL;
int err = 0; int err = 0;
int i = 0; int i = 0;
...@@ -332,30 +426,15 @@ int aq_ring_rx_fill(struct aq_ring_s *self) ...@@ -332,30 +426,15 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U; buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX; buff->len = AQ_CFG_RX_FRAME_MAX;
buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order); err = aq_get_rxpages(self, buff, page_order);
if (!buff->page) { if (err)
err = -ENOMEM;
goto err_exit; goto err_exit;
}
buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
buff->page, 0,
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
err = -ENOMEM;
goto err_exit;
}
buff->pa = aq_buf_daddr(&buff->rxdata);
buff = NULL; buff = NULL;
} }
err_exit: err_exit:
if (err < 0) {
if (buff && buff->page)
__free_pages(buff->page, 0);
}
return err; return err;
} }
...@@ -368,10 +447,7 @@ void aq_ring_rx_deinit(struct aq_ring_s *self) ...@@ -368,10 +447,7 @@ void aq_ring_rx_deinit(struct aq_ring_s *self)
self->sw_head = aq_ring_next_dx(self, self->sw_head)) { self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa, aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
__free_pages(buff->page, 0);
} }
err_exit:; err_exit:;
......
...@@ -17,6 +17,13 @@ ...@@ -17,6 +17,13 @@
struct page; struct page;
struct aq_nic_cfg_s; struct aq_nic_cfg_s;
struct aq_rxpage {
struct page *page;
dma_addr_t daddr;
unsigned int order;
unsigned int pg_off;
};
/* TxC SOP DX EOP /* TxC SOP DX EOP
* +----------+----------+----------+----------- * +----------+----------+----------+-----------
* 8bytes|len l3,l4 | pa | pa | pa * 8bytes|len l3,l4 | pa | pa | pa
...@@ -31,28 +38,21 @@ struct aq_nic_cfg_s; ...@@ -31,28 +38,21 @@ struct aq_nic_cfg_s;
*/ */
struct __packed aq_ring_buff_s { struct __packed aq_ring_buff_s {
union { union {
/* RX/TX */
dma_addr_t pa;
/* RX */ /* RX */
struct { struct {
u32 rss_hash; u32 rss_hash;
u16 next; u16 next;
u8 is_hash_l4; u8 is_hash_l4;
u8 rsvd1; u8 rsvd1;
struct page *page; struct aq_rxpage rxdata;
}; };
/* EOP */ /* EOP */
struct { struct {
dma_addr_t pa_eop; dma_addr_t pa_eop;
struct sk_buff *skb; struct sk_buff *skb;
}; };
/* DX */
struct {
dma_addr_t pa;
};
/* SOP */
struct {
dma_addr_t pa_sop;
u32 len_pkt_sop;
};
/* TxC */ /* TxC */
struct { struct {
u32 mss; u32 mss;
...@@ -91,6 +91,9 @@ struct aq_ring_stats_rx_s { ...@@ -91,6 +91,9 @@ struct aq_ring_stats_rx_s {
u64 bytes; u64 bytes;
u64 lro_packets; u64 lro_packets;
u64 jumbo_packets; u64 jumbo_packets;
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
}; };
struct aq_ring_stats_tx_s { struct aq_ring_stats_tx_s {
...@@ -116,6 +119,7 @@ struct aq_ring_s { ...@@ -116,6 +119,7 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */ unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */ unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */ /* stored here for fater math */
unsigned int page_order;
union aq_ring_stats_s stats; union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa; dma_addr_t dx_ring_pa;
}; };
...@@ -126,6 +130,16 @@ struct aq_ring_param_s { ...@@ -126,6 +130,16 @@ struct aq_ring_param_s {
cpumask_t affinity_mask; cpumask_t affinity_mask;
}; };
static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
{
return page_to_virt(rxpage->page) + rxpage->pg_off;
}
static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
{
return rxpage->daddr + rxpage->pg_off;
}
static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self, static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
unsigned int dx) unsigned int dx)
{ {
......
...@@ -353,6 +353,9 @@ void aq_vec_add_stats(struct aq_vec_s *self, ...@@ -353,6 +353,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->errors += rx->errors; stats_rx->errors += rx->errors;
stats_rx->jumbo_packets += rx->jumbo_packets; stats_rx->jumbo_packets += rx->jumbo_packets;
stats_rx->lro_packets += rx->lro_packets; stats_rx->lro_packets += rx->lro_packets;
stats_rx->pg_losts += rx->pg_losts;
stats_rx->pg_flips += rx->pg_flips;
stats_rx->pg_reuses += rx->pg_reuses;
stats_tx->packets += tx->packets; stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes; stats_tx->bytes += tx->bytes;
......
...@@ -619,8 +619,6 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, ...@@ -619,8 +619,6 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring) struct aq_ring_s *ring)
{ {
struct device *ndev = aq_nic_get_dev(ring->aq_nic);
for (; ring->hw_head != ring->sw_tail; for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL; struct aq_ring_buff_s *buff = NULL;
...@@ -687,8 +685,6 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, ...@@ -687,8 +685,6 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
is_err &= ~0x18U; is_err &= ~0x18U;
is_err &= ~0x04U; is_err &= ~0x04U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
if (is_err || rxd_wb->type & 0x1000U) { if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */ /* status error or DMA error */
buff->is_error = 1U; buff->is_error = 1U;
......
...@@ -654,8 +654,6 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, ...@@ -654,8 +654,6 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring) struct aq_ring_s *ring)
{ {
struct device *ndev = aq_nic_get_dev(ring->aq_nic);
for (; ring->hw_head != ring->sw_tail; for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL; struct aq_ring_buff_s *buff = NULL;
...@@ -697,8 +695,6 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, ...@@ -697,8 +695,6 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_cso_err = 0U; buff->is_cso_err = 0U;
} }
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */ /* MAC error or DMA error */
buff->is_error = 1U; buff->is_error = 1U;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment