Commit 09d96ee5 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by Jakub Kicinski

page_pool: remove PP_FLAG_PAGE_FRAG

PP_FLAG_PAGE_FRAG is not really needed after pp_frag_count
handling is unified and page_pool_alloc_frag() is supported
in 32-bit arch with 64-bit DMA, so remove it.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
CC: Lorenzo Bianconi <lorenzo@kernel.org>
CC: Alexander Duyck <alexander.duyck@gmail.com>
CC: Liang Chen <liangchen.linux@gmail.com>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20231020095952.11055-3-linyunsheng@huawei.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 58d53d8f
...@@ -3302,8 +3302,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, ...@@ -3302,8 +3302,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.dma_dir = bp->rx_dir; pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE; pp.max_len = PAGE_SIZE;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
pp.flags |= PP_FLAG_PAGE_FRAG;
rxr->page_pool = page_pool_create(&pp); rxr->page_pool = page_pool_create(&pp);
if (IS_ERR(rxr->page_pool)) { if (IS_ERR(rxr->page_pool)) {
......
...@@ -4940,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) ...@@ -4940,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
{ {
struct page_pool_params pp_params = { struct page_pool_params pp_params = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
PP_FLAG_DMA_SYNC_DEV,
.order = hns3_page_order(ring), .order = hns3_page_order(ring),
.pool_size = ring->desc_num * hns3_buf_size(ring) / .pool_size = ring->desc_num * hns3_buf_size(ring) /
(PAGE_SIZE << hns3_page_order(ring)), (PAGE_SIZE << hns3_page_order(ring)),
......
...@@ -595,9 +595,6 @@ static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) ...@@ -595,9 +595,6 @@ static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
.offset = 0, .offset = 0,
}; };
if (rxbufq->rx_buf_size == IDPF_RX_BUF_2048)
pp.flags |= PP_FLAG_PAGE_FRAG;
return page_pool_create(&pp); return page_pool_create(&pp);
} }
......
...@@ -1404,7 +1404,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, ...@@ -1404,7 +1404,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
} }
pp_params.order = get_order(buf_size); pp_params.order = get_order(buf_size);
pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; pp_params.flags = PP_FLAG_DMA_MAP;
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
pp_params.nid = NUMA_NO_NODE; pp_params.nid = NUMA_NO_NODE;
pp_params.dev = pfvf->dev; pp_params.dev = pfvf->dev;
......
...@@ -897,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, ...@@ -897,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct page_pool_params pp_params = { 0 }; struct page_pool_params pp_params = { 0 };
pp_params.order = 0; pp_params.order = 0;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = pool_size; pp_params.pool_size = pool_size;
pp_params.nid = node; pp_params.nid = node;
pp_params.dev = rq->pdev; pp_params.dev = rq->pdev;
......
...@@ -570,7 +570,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q) ...@@ -570,7 +570,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
{ {
struct page_pool_params pp_params = { struct page_pool_params pp_params = {
.order = 0, .order = 0,
.flags = PP_FLAG_PAGE_FRAG, .flags = 0,
.nid = NUMA_NO_NODE, .nid = NUMA_NO_NODE,
.dev = dev->dma_dev, .dev = dev->dma_dev,
}; };
......
...@@ -17,10 +17,8 @@ ...@@ -17,10 +17,8 @@
* Please note DMA-sync-for-CPU is still * Please note DMA-sync-for-CPU is still
* device driver responsibility * device driver responsibility
*/ */
#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
PP_FLAG_DMA_SYNC_DEV |\ PP_FLAG_DMA_SYNC_DEV)
PP_FLAG_PAGE_FRAG)
/* /*
* Fast allocation side cache array/stack * Fast allocation side cache array/stack
...@@ -45,7 +43,7 @@ struct pp_alloc_cache { ...@@ -45,7 +43,7 @@ struct pp_alloc_cache {
/** /**
* struct page_pool_params - page pool parameters * struct page_pool_params - page pool parameters
* @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
* @order: 2^order pages on allocation * @order: 2^order pages on allocation
* @pool_size: size of the ptr_ring * @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from * @nid: NUMA node id to allocate from pages from
......
...@@ -756,8 +756,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool, ...@@ -756,8 +756,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
unsigned int max_size = PAGE_SIZE << pool->p.order; unsigned int max_size = PAGE_SIZE << pool->p.order;
struct page *page = pool->frag_page; struct page *page = pool->frag_page;
if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) || if (WARN_ON(size > max_size))
size > max_size))
return NULL; return NULL;
size = ALIGN(size, dma_get_cache_alignment()); size = ALIGN(size, dma_get_cache_alignment());
......
...@@ -5765,7 +5765,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, ...@@ -5765,7 +5765,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
/* In general, avoid mixing page_pool and non-page_pool allocated /* In general, avoid mixing page_pool and non-page_pool allocated
* pages within the same SKB. Additionally avoid dealing with clones * pages within the same SKB. Additionally avoid dealing with clones
* with page_pool pages, in case the SKB is using page_pool fragment * with page_pool pages, in case the SKB is using page_pool fragment
* references (PP_FLAG_PAGE_FRAG). Since we only take full page * references (page_pool_alloc_frag()). Since we only take full page
* references for cloned SKBs at the moment that would result in * references for cloned SKBs at the moment that would result in
* inconsistent reference counts. * inconsistent reference counts.
* In theory we could take full references if @from is cloned and * In theory we could take full references if @from is cloned and
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment