Commit 7a36e491 authored by Kevin Hao's avatar Kevin Hao Committed by Jakub Kicinski

octeontx2-pf: Use the napi_alloc_frag() to alloc the pool buffers

In the current codes, the octeontx2 uses its own method to allocate
the pool buffers, but there are some issues in this implementation.
1. We have to run the otx2_get_page() for each allocation cycle and
   this is pretty error prone. As I can see there is no invocation
   of the otx2_get_page() in otx2_pool_refill_task(), this will leave
   the allocated pages have the wrong refcount and may be freed wrongly.
2. It wastes memory. For example, if we only receive one packet in a
   NAPI RX cycle, and then allocate a 2K buffer with otx2_alloc_rbuf()
   to refill the pool buffers and leave the remain area of the allocated
   page wasted. On a kernel with 64K page, 62K area is wasted.

IMHO it is really unnecessary to implement our own method for the
buffers allocate, we can reuse the napi_alloc_frag() to simplify
our code.
Signed-off-by: default avatarKevin Hao <haokexin@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent e7bb7ece
...@@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) ...@@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1)); (pfvf->hw.cq_ecount_wait - 1));
} }
dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
gfp_t gfp)
{ {
dma_addr_t iova; dma_addr_t iova;
u8 *buf;
/* Check if request can be accommodated in previous allocated page */ buf = napi_alloc_frag(pool->rbsize);
if (pool->page && ((pool->page_offset + pool->rbsize) <= if (unlikely(!buf))
(PAGE_SIZE << pool->rbpage_order))) {
pool->pageref++;
goto ret;
}
otx2_get_page(pool);
/* Allocate a new page */
pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
pool->rbpage_order);
if (unlikely(!pool->page))
return -ENOMEM; return -ENOMEM;
pool->page_offset = 0; iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
ret: DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset, if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
pool->rbsize, DMA_FROM_DEVICE); page_frag_free(buf);
if (!iova) {
if (!pool->page_offset)
__free_pages(pool->page, pool->rbpage_order);
pool->page = NULL;
return -ENOMEM; return -ENOMEM;
} }
pool->page_offset += pool->rbsize;
return iova; return iova;
} }
static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
{
dma_addr_t addr;
local_bh_disable();
addr = __otx2_alloc_rbuf(pfvf, pool);
local_bh_enable();
return addr;
}
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
{ {
struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_nic *pfvf = netdev_priv(netdev);
...@@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work) ...@@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs; free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) { while (cq->pool_ptrs) {
bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL); bufptr = otx2_alloc_rbuf(pfvf, rbpool);
if (bufptr <= 0) { if (bufptr <= 0) {
/* Schedule a WQ if we fails to free atleast half of the /* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ. * pointers else enable napi for this RQ.
...@@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, ...@@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return err; return err;
pool->rbsize = buf_size; pool->rbsize = buf_size;
pool->rbpage_order = get_order(buf_size);
/* Initialize this pool's context via AF */ /* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
...@@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) ...@@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM; return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) { for (ptr = 0; ptr < num_sqbs; ptr++) {
bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0) if (bufptr <= 0)
return bufptr; return bufptr;
otx2_aura_freeptr(pfvf, pool_id, bufptr); otx2_aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
} }
otx2_get_page(pool);
} }
return 0; return 0;
...@@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) ...@@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id]; pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) { for (ptr = 0; ptr < num_ptrs; ptr++) {
bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0) if (bufptr <= 0)
return bufptr; return bufptr;
otx2_aura_freeptr(pfvf, pool_id, otx2_aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM); bufptr + OTX2_HEAD_ROOM);
} }
otx2_get_page(pool);
} }
return 0; return 0;
......
...@@ -434,18 +434,6 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf, ...@@ -434,18 +434,6 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0)); otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
} }
/* Update page ref count */
static inline void otx2_get_page(struct otx2_pool *pool)
{
if (!pool->page)
return;
if (pool->pageref)
page_ref_add(pool->page, pool->pageref);
pool->pageref = 0;
pool->page = NULL;
}
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
{ {
if (type == AURA_NIX_SQ) if (type == AURA_NIX_SQ)
...@@ -589,8 +577,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); ...@@ -589,8 +577,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf); int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf); int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf);
dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
gfp_t gfp);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
......
...@@ -286,7 +286,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, ...@@ -286,7 +286,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
/* Refill pool with new buffers */ /* Refill pool with new buffers */
while (cq->pool_ptrs) { while (cq->pool_ptrs) {
bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC); bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
if (unlikely(bufptr <= 0)) { if (unlikely(bufptr <= 0)) {
struct refill_work *work; struct refill_work *work;
struct delayed_work *dwork; struct delayed_work *dwork;
...@@ -304,7 +304,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, ...@@ -304,7 +304,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--; cq->pool_ptrs--;
} }
otx2_get_page(cq->rbpool);
return processed_cqe; return processed_cqe;
} }
......
...@@ -113,11 +113,7 @@ struct otx2_cq_poll { ...@@ -113,11 +113,7 @@ struct otx2_cq_poll {
struct otx2_pool { struct otx2_pool {
struct qmem *stack; struct qmem *stack;
struct qmem *fc_addr; struct qmem *fc_addr;
u8 rbpage_order;
u16 rbsize; u16 rbsize;
u32 page_offset;
u16 pageref;
struct page *page;
}; };
struct otx2_cq_queue { struct otx2_cq_queue {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment