Commit 6960f7e3 authored by David S. Miller's avatar David S. Miller

Merge branch 'page_pool-followup-changes-to-restore-tracepoint-features'

Jesper Dangaard says:

====================
page_pool: followup changes to restore tracepoint features

This patchset is a followup to Jonathan patch, that do not release
pool until inflight == 0. That changed page_pool to be responsible for
its own delayed destruction instead of relying on xdp memory model.

As the page_pool maintainer, I'm promoting the use of tracepoint to
troubleshoot and help driver developers verify correctness when
converting at driver to use page_pool. The role of xdp:mem_disconnect
have changed, which broke my bpftrace tools for shutdown verification.
With these changes, the same capabilities are regained.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b95e86d8 832ccf6f
......@@ -112,6 +112,8 @@ struct page_pool {
* refcnt serves purpose is to simplify drivers error handling.
*/
refcount_t user_cnt;
u64 destroy_cnt;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
......
......@@ -8,9 +8,10 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
#include <net/page_pool.h>
TRACE_EVENT(page_pool_inflight,
TRACE_EVENT(page_pool_release,
TP_PROTO(const struct page_pool *pool,
s32 inflight, u32 hold, u32 release),
......@@ -22,6 +23,7 @@ TRACE_EVENT(page_pool_inflight,
__field(s32, inflight)
__field(u32, hold)
__field(u32, release)
__field(u64, cnt)
),
TP_fast_assign(
......@@ -29,10 +31,12 @@ TRACE_EVENT(page_pool_inflight,
__entry->inflight = inflight;
__entry->hold = hold;
__entry->release = release;
__entry->cnt = pool->destroy_cnt;
),
TP_printk("page_pool=%p inflight=%d hold=%u release=%u",
__entry->pool, __entry->inflight, __entry->hold, __entry->release)
TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu",
__entry->pool, __entry->inflight, __entry->hold,
__entry->release, __entry->cnt)
);
TRACE_EVENT(page_pool_state_release,
......@@ -46,16 +50,18 @@ TRACE_EVENT(page_pool_state_release,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, release)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->release = release;
__entry->pfn = page_to_pfn(page);
),
TP_printk("page_pool=%p page=%p release=%u",
__entry->pool, __entry->page, __entry->release)
TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
......@@ -69,16 +75,18 @@ TRACE_EVENT(page_pool_state_hold,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, hold)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->hold = hold;
__entry->pfn = page_to_pfn(page);
),
TP_printk("page_pool=%p page=%p hold=%u",
__entry->pool, __entry->page, __entry->hold)
TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->hold)
);
#endif /* _TRACE_PAGE_POOL_H */
......
......@@ -200,7 +200,7 @@ static s32 page_pool_inflight(struct page_pool *pool)
inflight = _distance(hold_cnt, release_cnt);
trace_page_pool_inflight(pool, inflight, hold_cnt, release_cnt);
trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
return inflight;
......@@ -349,10 +349,13 @@ static void page_pool_free(struct page_pool *pool)
kfree(pool);
}
static void page_pool_scrub(struct page_pool *pool)
static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{
struct page *page;
if (pool->destroy_cnt)
return;
/* Empty alloc cache, assume caller made sure this is
* no-longer in use, and page_pool_alloc_pages() cannot be
* call concurrently.
......@@ -361,6 +364,12 @@ static void page_pool_scrub(struct page_pool *pool)
page = pool->alloc.cache[--pool->alloc.count];
__page_pool_return_page(pool, page);
}
}
static void page_pool_scrub(struct page_pool *pool)
{
page_pool_empty_alloc_cache_once(pool);
pool->destroy_cnt++;
/* No more consumers should exist, but producers could still
* be in-flight.
......
......@@ -73,11 +73,6 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
/* Allow this ID to be reused */
ida_simple_remove(&mem_id_pool, xa->mem.id);
/* Poison memory */
xa->mem.id = 0xFFFF;
xa->mem.type = 0xF0F0;
xa->allocator = (void *)0xDEAD9001;
kfree(xa);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment