Commit 86b05508 authored by Somnath Kotur's avatar Somnath Kotur Committed by Jakub Kicinski

bnxt_en: Use the unified RX page pool buffers for XDP and non-XDP

Convert to use the page pool buffers for the aggregation ring when
running in non-XDP mode.  This simplifies the driver and we benefit
from the recycling of pages.  Adjust the page pool size to account
for the aggregation ring size.
Signed-off-by: default avatarSomnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20230817231911.165035-2-michael.chan@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c6cfc6cd
......@@ -877,48 +877,15 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
struct pci_dev *pdev = bp->pdev;
struct page *page;
dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
if (BNXT_RX_PAGE_MODE(bp)) {
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
} else {
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
page = rxr->rx_page;
if (!page) {
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
rxr->rx_page = page;
rxr->rx_page_offset = 0;
}
offset = rxr->rx_page_offset;
rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
if (rxr->rx_page_offset == PAGE_SIZE)
rxr->rx_page = NULL;
else
get_page(page);
} else {
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
}
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
mapping = dma_map_page_attrs(&pdev->dev, page, offset,
BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
return -EIO;
}
}
if (!page)
return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
......@@ -1204,6 +1171,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
agg_bufs, tpa, NULL);
if (!total_frag_len) {
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
......@@ -1794,6 +1762,7 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
return;
}
skb_record_rx_queue(skb, bnapi->index);
skb_mark_for_recycle(skb);
napi_gro_receive(&bnapi->napi, skb);
}
......@@ -3002,30 +2971,16 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
if (!page)
continue;
if (BNXT_RX_PAGE_MODE(bp)) {
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
__clear_bit(i, rxr->rx_agg_bmap);
page_pool_recycle_direct(rxr->page_pool, page);
} else {
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
__clear_bit(i, rxr->rx_agg_bmap);
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
__clear_bit(i, rxr->rx_agg_bmap);
__free_page(page);
}
page_pool_recycle_direct(rxr->page_pool, page);
}
skip_rx_agg_free:
if (rxr->rx_page) {
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
}
map = rxr->rx_tpa_idx_map;
if (map)
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
......@@ -3244,7 +3199,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
{
struct page_pool_params pp = { 0 };
pp.pool_size = bp->rx_ring_size;
pp.pool_size = bp->rx_agg_ring_size;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev;
......
......@@ -919,9 +919,6 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
struct page *rx_page;
unsigned int rx_page_offset;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment