Commit 4321de44 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Christoph Hellwig

page_pool: check for DMA sync shortcut earlier

We can save a couple more function calls in the Page Pool code if we
check for dma_need_sync() earlier, just when we test pp->p.dma_sync.
Move both these checks into an inline wrapper and call the PP wrapper
over the generic DMA sync function only when both are true.
You can't cache the result of dma_need_sync() in &page_pool, as it may
change anytime if an SWIOTLB buffer is allocated or mapped.
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 403f11ac
...@@ -398,16 +398,26 @@ static struct page *__page_pool_get_cached(struct page_pool *pool) ...@@ -398,16 +398,26 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
return page; return page;
} }
static void page_pool_dma_sync_for_device(struct page_pool *pool, static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
struct page *page, struct page *page,
unsigned int dma_sync_size) u32 dma_sync_size)
{ {
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
dma_addr_t dma_addr = page_pool_get_dma_addr(page); dma_addr_t dma_addr = page_pool_get_dma_addr(page);
dma_sync_size = min(dma_sync_size, pool->p.max_len); dma_sync_size = min(dma_sync_size, pool->p.max_len);
dma_sync_single_range_for_device(pool->p.dev, dma_addr, __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
pool->p.offset, dma_sync_size, dma_sync_size, pool->p.dma_dir);
pool->p.dma_dir); #endif
}
static __always_inline void
page_pool_dma_sync_for_device(const struct page_pool *pool,
struct page *page,
u32 dma_sync_size)
{
if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
__page_pool_dma_sync_for_device(pool, page, dma_sync_size);
} }
static bool page_pool_dma_map(struct page_pool *pool, struct page *page) static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
...@@ -429,8 +439,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page) ...@@ -429,8 +439,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
if (page_pool_set_dma_addr(page, dma)) if (page_pool_set_dma_addr(page, dma))
goto unmap_failed; goto unmap_failed;
if (pool->dma_sync) page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
return true; return true;
...@@ -699,9 +708,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, ...@@ -699,9 +708,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
if (likely(__page_pool_page_can_be_recycled(page))) { if (likely(__page_pool_page_can_be_recycled(page))) {
/* Read barrier done in page_ref_count / READ_ONCE */ /* Read barrier done in page_ref_count / READ_ONCE */
if (pool->dma_sync) page_pool_dma_sync_for_device(pool, page, dma_sync_size);
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
if (allow_direct && in_softirq() && if (allow_direct && in_softirq() &&
page_pool_recycle_in_cache(page, pool)) page_pool_recycle_in_cache(page, pool))
...@@ -812,9 +819,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool, ...@@ -812,9 +819,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
return NULL; return NULL;
if (__page_pool_page_can_be_recycled(page)) { if (__page_pool_page_can_be_recycled(page)) {
if (pool->dma_sync) page_pool_dma_sync_for_device(pool, page, -1);
page_pool_dma_sync_for_device(pool, page, -1);
return page; return page;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment