Commit df18948d authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'device-memory-tcp'

Prep patches for Device Memory TCP

Pick up a couple of prep patches for Device Memory TCP which
stand on their own.

Link: https://patch.msgid.link/20240628003253.1694510-1-almasrymina@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents ac263276 07c3cc51
...@@ -32,13 +32,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) ...@@ -32,13 +32,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]); __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
} }
bool napi_pp_put_page(struct page *page); bool napi_pp_put_page(netmem_ref netmem);
static inline void static inline void
skb_page_unref(struct page *page, bool recycle) skb_page_unref(struct page *page, bool recycle)
{ {
#ifdef CONFIG_PAGE_POOL #ifdef CONFIG_PAGE_POOL
if (recycle && napi_pp_put_page(page)) if (recycle && napi_pp_put_page(page_to_netmem(page)))
return; return;
#endif #endif
put_page(page); put_page(page);
......
...@@ -38,4 +38,19 @@ static inline netmem_ref page_to_netmem(struct page *page) ...@@ -38,4 +38,19 @@ static inline netmem_ref page_to_netmem(struct page *page)
return (__force netmem_ref)page; return (__force netmem_ref)page;
} }
static inline int netmem_ref_count(netmem_ref netmem)
{
return page_ref_count(netmem_to_page(netmem));
}
static inline unsigned long netmem_to_pfn(netmem_ref netmem)
{
return page_to_pfn(netmem_to_page(netmem));
}
static inline netmem_ref netmem_compound_head(netmem_ref netmem)
{
return page_to_netmem(compound_head(netmem_to_page(netmem)));
}
#endif /* _NET_NETMEM_H */ #endif /* _NET_NETMEM_H */
...@@ -55,6 +55,8 @@ ...@@ -55,6 +55,8 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <net/page_pool/types.h> #include <net/page_pool/types.h>
#include <net/net_debug.h>
#include <net/netmem.h>
#ifdef CONFIG_PAGE_POOL_STATS #ifdef CONFIG_PAGE_POOL_STATS
/* Deprecated driver-facing API, use netlink instead */ /* Deprecated driver-facing API, use netlink instead */
...@@ -212,6 +214,11 @@ page_pool_get_dma_dir(const struct page_pool *pool) ...@@ -212,6 +214,11 @@ page_pool_get_dma_dir(const struct page_pool *pool)
return pool->p.dma_dir; return pool->p.dma_dir;
} }
static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
{
atomic_long_set(&netmem_to_page(netmem)->pp_ref_count, nr);
}
/** /**
* page_pool_fragment_page() - split a fresh page into fragments * page_pool_fragment_page() - split a fresh page into fragments
* @page: page to split * @page: page to split
...@@ -232,11 +239,12 @@ page_pool_get_dma_dir(const struct page_pool *pool) ...@@ -232,11 +239,12 @@ page_pool_get_dma_dir(const struct page_pool *pool)
*/ */
static inline void page_pool_fragment_page(struct page *page, long nr) static inline void page_pool_fragment_page(struct page *page, long nr)
{ {
atomic_long_set(&page->pp_ref_count, nr); page_pool_fragment_netmem(page_to_netmem(page), nr);
} }
static inline long page_pool_unref_page(struct page *page, long nr) static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
{ {
struct page *page = netmem_to_page(netmem);
long ret; long ret;
/* If nr == pp_ref_count then we have cleared all remaining /* If nr == pp_ref_count then we have cleared all remaining
...@@ -279,15 +287,41 @@ static inline long page_pool_unref_page(struct page *page, long nr) ...@@ -279,15 +287,41 @@ static inline long page_pool_unref_page(struct page *page, long nr)
return ret; return ret;
} }
static inline long page_pool_unref_page(struct page *page, long nr)
{
return page_pool_unref_netmem(page_to_netmem(page), nr);
}
static inline void page_pool_ref_netmem(netmem_ref netmem)
{
atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count);
}
static inline void page_pool_ref_page(struct page *page) static inline void page_pool_ref_page(struct page *page)
{ {
atomic_long_inc(&page->pp_ref_count); page_pool_ref_netmem(page_to_netmem(page));
} }
static inline bool page_pool_is_last_ref(struct page *page) static inline bool page_pool_is_last_ref(netmem_ref netmem)
{ {
/* If page_pool_unref_page() returns 0, we were the last user */ /* If page_pool_unref_page() returns 0, we were the last user */
return page_pool_unref_page(page, 1) == 0; return page_pool_unref_netmem(netmem, 1) == 0;
}
static inline void page_pool_put_netmem(struct page_pool *pool,
netmem_ref netmem,
unsigned int dma_sync_size,
bool allow_direct)
{
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
if (!page_pool_is_last_ref(netmem))
return;
page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
#endif
} }
/** /**
...@@ -308,15 +342,15 @@ static inline void page_pool_put_page(struct page_pool *pool, ...@@ -308,15 +342,15 @@ static inline void page_pool_put_page(struct page_pool *pool,
unsigned int dma_sync_size, unsigned int dma_sync_size,
bool allow_direct) bool allow_direct)
{ {
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker. allow_direct);
*/ }
#ifdef CONFIG_PAGE_POOL
if (!page_pool_is_last_ref(page))
return;
page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct); static inline void page_pool_put_full_netmem(struct page_pool *pool,
#endif netmem_ref netmem,
bool allow_direct)
{
page_pool_put_netmem(pool, netmem, -1, allow_direct);
} }
/** /**
...@@ -331,7 +365,7 @@ static inline void page_pool_put_page(struct page_pool *pool, ...@@ -331,7 +365,7 @@ static inline void page_pool_put_page(struct page_pool *pool,
static inline void page_pool_put_full_page(struct page_pool *pool, static inline void page_pool_put_full_page(struct page_pool *pool,
struct page *page, bool allow_direct) struct page *page, bool allow_direct)
{ {
page_pool_put_page(pool, page, -1, allow_direct); page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
} }
/** /**
...@@ -365,6 +399,18 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va, ...@@ -365,6 +399,18 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
} }
static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
{
struct page *page = netmem_to_page(netmem);
dma_addr_t ret = page->dma_addr;
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
ret <<= PAGE_SHIFT;
return ret;
}
/** /**
* page_pool_get_dma_addr() - Retrieve the stored DMA address. * page_pool_get_dma_addr() - Retrieve the stored DMA address.
* @page: page allocated from a page pool * @page: page allocated from a page pool
...@@ -374,16 +420,14 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va, ...@@ -374,16 +420,14 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
*/ */
static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{ {
dma_addr_t ret = page->dma_addr; return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
ret <<= PAGE_SHIFT;
return ret;
} }
static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) static inline bool page_pool_set_dma_addr_netmem(netmem_ref netmem,
dma_addr_t addr)
{ {
struct page *page = netmem_to_page(netmem);
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) { if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
page->dma_addr = addr >> PAGE_SHIFT; page->dma_addr = addr >> PAGE_SHIFT;
...@@ -419,6 +463,11 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, ...@@ -419,6 +463,11 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
page_pool_get_dma_dir(pool)); page_pool_get_dma_dir(pool));
} }
static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
}
static inline bool page_pool_put(struct page_pool *pool) static inline bool page_pool_put(struct page_pool *pool)
{ {
return refcount_dec_and_test(&pool->user_cnt); return refcount_dec_and_test(&pool->user_cnt);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/ptr_ring.h> #include <linux/ptr_ring.h>
#include <linux/types.h> #include <linux/types.h>
#include <net/netmem.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap * map/unmap
...@@ -40,7 +41,7 @@ ...@@ -40,7 +41,7 @@
#define PP_ALLOC_CACHE_REFILL 64 #define PP_ALLOC_CACHE_REFILL 64
struct pp_alloc_cache { struct pp_alloc_cache {
u32 count; u32 count;
struct page *cache[PP_ALLOC_CACHE_SIZE]; netmem_ref cache[PP_ALLOC_CACHE_SIZE];
}; };
/** /**
...@@ -73,7 +74,7 @@ struct page_pool_params { ...@@ -73,7 +74,7 @@ struct page_pool_params {
struct net_device *netdev; struct net_device *netdev;
unsigned int flags; unsigned int flags;
/* private: used by test code only */ /* private: used by test code only */
void (*init_callback)(struct page *page, void *arg); void (*init_callback)(netmem_ref netmem, void *arg);
void *init_arg; void *init_arg;
); );
}; };
...@@ -151,7 +152,7 @@ struct page_pool { ...@@ -151,7 +152,7 @@ struct page_pool {
*/ */
__cacheline_group_begin(frag) __aligned(4 * sizeof(long)); __cacheline_group_begin(frag) __aligned(4 * sizeof(long));
long frag_users; long frag_users;
struct page *frag_page; netmem_ref frag_page;
unsigned int frag_offset; unsigned int frag_offset;
__cacheline_group_end(frag); __cacheline_group_end(frag);
...@@ -220,8 +221,12 @@ struct page_pool { ...@@ -220,8 +221,12 @@ struct page_pool {
}; };
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp); unsigned int size, gfp_t gfp);
netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
unsigned int *offset, unsigned int size,
gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params); struct page_pool *page_pool_create(const struct page_pool_params *params);
struct page_pool *page_pool_create_percpu(const struct page_pool_params *params, struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
int cpuid); int cpuid);
...@@ -252,6 +257,9 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, ...@@ -252,6 +257,9 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
} }
#endif #endif
void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
unsigned int dma_sync_size,
bool allow_direct);
void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, unsigned int dma_sync_size,
bool allow_direct); bool allow_direct);
......
...@@ -42,51 +42,53 @@ TRACE_EVENT(page_pool_release, ...@@ -42,51 +42,53 @@ TRACE_EVENT(page_pool_release,
TRACE_EVENT(page_pool_state_release, TRACE_EVENT(page_pool_state_release,
TP_PROTO(const struct page_pool *pool, TP_PROTO(const struct page_pool *pool,
const struct page *page, u32 release), netmem_ref netmem, u32 release),
TP_ARGS(pool, page, release), TP_ARGS(pool, netmem, release),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const struct page_pool *, pool) __field(const struct page_pool *, pool)
__field(const struct page *, page) __field(unsigned long, netmem)
__field(u32, release) __field(u32, release)
__field(unsigned long, pfn) __field(unsigned long, pfn)
), ),
TP_fast_assign( TP_fast_assign(
__entry->pool = pool; __entry->pool = pool;
__entry->page = page; __entry->netmem = (__force unsigned long)netmem;
__entry->release = release; __entry->release = release;
__entry->pfn = page_to_pfn(page); __entry->pfn = netmem_to_pfn(netmem);
), ),
TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u", TP_printk("page_pool=%p netmem=%p pfn=0x%lx release=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->release) __entry->pool, (void *)__entry->netmem,
__entry->pfn, __entry->release)
); );
TRACE_EVENT(page_pool_state_hold, TRACE_EVENT(page_pool_state_hold,
TP_PROTO(const struct page_pool *pool, TP_PROTO(const struct page_pool *pool,
const struct page *page, u32 hold), netmem_ref netmem, u32 hold),
TP_ARGS(pool, page, hold), TP_ARGS(pool, netmem, hold),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const struct page_pool *, pool) __field(const struct page_pool *, pool)
__field(const struct page *, page) __field(unsigned long, netmem)
__field(u32, hold) __field(u32, hold)
__field(unsigned long, pfn) __field(unsigned long, pfn)
), ),
TP_fast_assign( TP_fast_assign(
__entry->pool = pool; __entry->pool = pool;
__entry->page = page; __entry->netmem = (__force unsigned long)netmem;
__entry->hold = hold; __entry->hold = hold;
__entry->pfn = page_to_pfn(page); __entry->pfn = netmem_to_pfn(netmem);
), ),
TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u", TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->hold) __entry->pool, (void *)__entry->netmem,
__entry->pfn, __entry->hold)
); );
TRACE_EVENT(page_pool_update_nid, TRACE_EVENT(page_pool_update_nid,
......
...@@ -127,9 +127,10 @@ struct xdp_test_data { ...@@ -127,9 +127,10 @@ struct xdp_test_data {
#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
#define TEST_XDP_MAX_BATCH 256 #define TEST_XDP_MAX_BATCH 256
static void xdp_test_run_init_page(struct page *page, void *arg) static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
{ {
struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); struct xdp_page_head *head =
phys_to_virt(page_to_phys(netmem_to_page(netmem)));
struct xdp_buff *new_ctx, *orig_ctx; struct xdp_buff *new_ctx, *orig_ctx;
u32 headroom = XDP_PACKET_HEADROOM; u32 headroom = XDP_PACKET_HEADROOM;
struct xdp_test_data *xdp = arg; struct xdp_test_data *xdp = arg;
......
This diff is collapsed.
...@@ -1015,8 +1015,10 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, ...@@ -1015,8 +1015,10 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
EXPORT_SYMBOL(skb_cow_data_for_xdp); EXPORT_SYMBOL(skb_cow_data_for_xdp);
#if IS_ENABLED(CONFIG_PAGE_POOL) #if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(struct page *page) bool napi_pp_put_page(netmem_ref netmem)
{ {
struct page *page = netmem_to_page(netmem);
page = compound_head(page); page = compound_head(page);
/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
...@@ -1029,7 +1031,7 @@ bool napi_pp_put_page(struct page *page) ...@@ -1029,7 +1031,7 @@ bool napi_pp_put_page(struct page *page)
if (unlikely(!is_pp_page(page))) if (unlikely(!is_pp_page(page)))
return false; return false;
page_pool_put_full_page(page->pp, page, false); page_pool_put_full_netmem(page->pp, page_to_netmem(page), false);
return true; return true;
} }
...@@ -1040,7 +1042,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data) ...@@ -1040,7 +1042,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
{ {
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false; return false;
return napi_pp_put_page(virt_to_page(data)); return napi_pp_put_page(page_to_netmem(virt_to_page(data)));
} }
/** /**
......
...@@ -2,9 +2,12 @@ ...@@ -2,9 +2,12 @@
SUBDIRS = lib generated samples SUBDIRS = lib generated samples
all: $(SUBDIRS) all: $(SUBDIRS) libynl.a
samples: | lib generated samples: | lib generated
libynl.a: | lib generated
@echo -e "\tAR $@"
@ar rcs $@ lib/ynl.o generated/*-user.o
$(SUBDIRS): $(SUBDIRS):
@if [ -f "$@/Makefile" ] ; then \ @if [ -f "$@/Makefile" ] ; then \
...@@ -17,5 +20,6 @@ clean distclean: ...@@ -17,5 +20,6 @@ clean distclean:
$(MAKE) -C $$dir $@; \ $(MAKE) -C $$dir $@; \
fi \ fi \
done done
rm -f libynl.a
.PHONY: all clean distclean $(SUBDIRS) .PHONY: all clean distclean $(SUBDIRS)
...@@ -14,7 +14,9 @@ include $(wildcard *.d) ...@@ -14,7 +14,9 @@ include $(wildcard *.d)
all: ynl.a all: ynl.a
ynl.a: $(OBJS) ynl.a: $(OBJS)
ar rcs $@ $(OBJS) @echo -e "\tAR $@"
@ar rcs $@ $(OBJS)
clean: clean:
rm -f *.o *.d *~ rm -f *.o *.d *~
rm -rf __pycache__ rm -rf __pycache__
......
# SPDX-License-Identifier: GPL-2.0
# YNL selftest build snippet
# Inputs:
#
# YNL_GENS: families we need in the selftests
# YNL_PROGS: TEST_PROGS which need YNL (TODO, none exist, yet)
# YNL_GEN_FILES: TEST_GEN_FILES which need YNL
YNL_OUTPUTS := $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_FILES))
$(YNL_OUTPUTS): $(OUTPUT)/libynl.a
$(YNL_OUTPUTS): CFLAGS += \
-I$(top_srcdir)/usr/include/ $(KHDR_INCLUDES) \
-I$(top_srcdir)/tools/net/ynl/lib/ \
-I$(top_srcdir)/tools/net/ynl/generated/
$(OUTPUT)/libynl.a:
$(Q)$(MAKE) -C $(top_srcdir)/tools/net/ynl GENS="$(YNL_GENS)" libynl.a
$(Q)cp $(top_srcdir)/tools/net/ynl/libynl.a $(OUTPUT)/libynl.a
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment