Commit 2b43470a authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

xsk: Introduce AF_XDP buffer allocation API

In order to simplify AF_XDP zero-copy enablement for NIC driver
developers, a new AF_XDP buffer allocation API is added. The
implementation is based on a single core (single producer/consumer)
buffer pool for the AF_XDP UMEM.

A buffer is allocated using the xsk_buff_alloc() function, and
returned using xsk_buff_free(). If a buffer is disassociated with the
pool, e.g. when a buffer is passed to an AF_XDP socket, a buffer is
said to be released. Currently, the release function is only used by
the AF_XDP internals and not visible to the driver.

Drivers using this API should register the XDP memory model with the
new MEM_TYPE_XSK_BUFF_POOL type.

The API is defined in net/xdp_sock_drv.h.

The buffer type is struct xdp_buff, and follows the lifetime of
regular xdp_buffs, i.e.  the lifetime of an xdp_buff is restricted to
a NAPI context. In other words, the API is not replacing xdp_frames.

In addition to introducing the API and implementations, the AF_XDP
core is migrated to use the new APIs.

rfc->v1: Fixed build errors/warnings for m68k and riscv. (kbuild test
         robot)
         Added headroom/chunk size getter. (Maxim/Björn)

v1->v2: Swapped SoBs. (Maxim)

v2->v3: Initialize struct xdp_buff member frame_sz. (Björn)
        Add API to query the DMA address of a frame. (Maxim)
        Do DMA sync for CPU till the end of the frame to handle
        possible growth (frame_sz). (Maxim)
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200520192103.355233-6-bjorn.topel@gmail.com
parent 89e4a376
...@@ -40,6 +40,7 @@ enum xdp_mem_type { ...@@ -40,6 +40,7 @@ enum xdp_mem_type {
MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
MEM_TYPE_PAGE_POOL, MEM_TYPE_PAGE_POOL,
MEM_TYPE_ZERO_COPY, MEM_TYPE_ZERO_COPY,
MEM_TYPE_XSK_BUFF_POOL,
MEM_TYPE_MAX, MEM_TYPE_MAX,
}; };
...@@ -119,7 +120,8 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) ...@@ -119,7 +120,8 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
int metasize; int metasize;
int headroom; int headroom;
if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY ||
xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
return xdp_convert_zc_to_xdp_frame(xdp); return xdp_convert_zc_to_xdp_frame(xdp);
/* Assure headroom is available for storing info */ /* Assure headroom is available for storing info */
......
...@@ -31,11 +31,13 @@ struct xdp_umem_fq_reuse { ...@@ -31,11 +31,13 @@ struct xdp_umem_fq_reuse {
struct xdp_umem { struct xdp_umem {
struct xsk_queue *fq; struct xsk_queue *fq;
struct xsk_queue *cq; struct xsk_queue *cq;
struct xsk_buff_pool *pool;
struct xdp_umem_page *pages; struct xdp_umem_page *pages;
u64 chunk_mask; u64 chunk_mask;
u64 size; u64 size;
u32 headroom; u32 headroom;
u32 chunk_size_nohr; u32 chunk_size_nohr;
u32 chunk_size;
struct user_struct *user; struct user_struct *user;
refcount_t users; refcount_t users;
struct work_struct work; struct work_struct work;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#define _LINUX_XDP_SOCK_DRV_H #define _LINUX_XDP_SOCK_DRV_H
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <net/xsk_buff_pool.h>
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
...@@ -101,6 +102,94 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) ...@@ -101,6 +102,94 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
return umem->chunk_size_nohr; return umem->chunk_size_nohr;
} }
static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
{
return XDP_PACKET_HEADROOM + umem->headroom;
}
static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
{
return umem->chunk_size;
}
static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
{
return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
}
static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
struct xdp_rxq_info *rxq)
{
xp_set_rxq_info(umem->pool, rxq);
}
static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
unsigned long attrs)
{
xp_dma_unmap(umem->pool, attrs);
}
static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
unsigned long attrs)
{
return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
}
static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
return xp_get_dma(xskb);
}
static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
return xp_get_frame_dma(xskb);
}
static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
{
return xp_alloc(umem->pool);
}
static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
{
return xp_can_alloc(umem->pool, count);
}
static inline void xsk_buff_free(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
xp_free(xskb);
}
static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
{
return xp_raw_get_dma(umem->pool, addr);
}
static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
{
return xp_raw_get_data(umem->pool, addr);
}
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
xp_dma_sync_for_cpu(xskb);
}
static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
dma_addr_t dma,
size_t size)
{
xp_dma_sync_for_device(umem->pool, dma, size);
}
#else #else
static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
...@@ -212,6 +301,81 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) ...@@ -212,6 +301,81 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
return 0; return 0;
} }
static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
{
return 0;
}
static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
{
return 0;
}
static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
{
return 0;
}
static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
struct xdp_rxq_info *rxq)
{
}
static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
unsigned long attrs)
{
}
static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
unsigned long attrs)
{
return 0;
}
static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
{
return 0;
}
static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
{
return 0;
}
static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
{
return NULL;
}
static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
{
return false;
}
static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
{
return 0;
}
static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
{
return NULL;
}
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
}
static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
dma_addr_t dma,
size_t size)
{
}
#endif /* CONFIG_XDP_SOCKETS */ #endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_DRV_H */ #endif /* _LINUX_XDP_SOCK_DRV_H */
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2020 Intel Corporation. */
#ifndef XSK_BUFF_POOL_H_
#define XSK_BUFF_POOL_H_
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <net/xdp.h>
struct xsk_buff_pool;
struct xdp_rxq_info;
struct xsk_queue;
struct xdp_desc;
struct device;
struct page;
struct xdp_buff_xsk {
struct xdp_buff xdp;
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
bool unaligned;
u64 orig_addr;
struct list_head free_list_node;
};
/* AF_XDP core. */
struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
u32 chunk_size, u32 headroom, u64 size,
bool unaligned);
void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
void xp_destroy(struct xsk_buff_pool *pool);
void xp_release(struct xdp_buff_xsk *xskb);
u64 xp_get_handle(struct xdp_buff_xsk *xskb);
bool xp_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb);
dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb);
void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb);
void xp_dma_sync_for_device(struct xsk_buff_pool *pool, dma_addr_t dma,
size_t size);
#endif /* XSK_BUFF_POOL_H_ */
...@@ -287,7 +287,8 @@ TRACE_EVENT(xdp_devmap_xmit, ...@@ -287,7 +287,8 @@ TRACE_EVENT(xdp_devmap_xmit,
FN(PAGE_SHARED) \ FN(PAGE_SHARED) \
FN(PAGE_ORDER0) \ FN(PAGE_ORDER0) \
FN(PAGE_POOL) \ FN(PAGE_POOL) \
FN(ZERO_COPY) FN(ZERO_COPY) \
FN(XSK_BUFF_POOL)
#define __MEM_TYPE_TP_FN(x) \ #define __MEM_TYPE_TP_FN(x) \
TRACE_DEFINE_ENUM(MEM_TYPE_##x); TRACE_DEFINE_ENUM(MEM_TYPE_##x);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <net/xdp.h> #include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */ #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h> #include <trace/events/xdp.h>
#include <net/xdp_sock_drv.h>
#define REG_STATE_NEW 0x0 #define REG_STATE_NEW 0x0
#define REG_STATE_REGISTERED 0x1 #define REG_STATE_REGISTERED 0x1
...@@ -361,7 +362,7 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); ...@@ -361,7 +362,7 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
* of xdp_frames/pages in those cases. * of xdp_frames/pages in those cases.
*/ */
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
unsigned long handle) unsigned long handle, struct xdp_buff *xdp)
{ {
struct xdp_mem_allocator *xa; struct xdp_mem_allocator *xa;
struct page *page; struct page *page;
...@@ -390,6 +391,11 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, ...@@ -390,6 +391,11 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
xa->zc_alloc->free(xa->zc_alloc, handle); xa->zc_alloc->free(xa->zc_alloc, handle);
rcu_read_unlock(); rcu_read_unlock();
break;
case MEM_TYPE_XSK_BUFF_POOL:
/* NB! Only valid from an xdp_buff! */
xsk_buff_free(xdp);
break;
default: default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */ /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
break; break;
...@@ -398,19 +404,19 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, ...@@ -398,19 +404,19 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
void xdp_return_frame(struct xdp_frame *xdpf) void xdp_return_frame(struct xdp_frame *xdpf)
{ {
__xdp_return(xdpf->data, &xdpf->mem, false, 0); __xdp_return(xdpf->data, &xdpf->mem, false, 0, NULL);
} }
EXPORT_SYMBOL_GPL(xdp_return_frame); EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{ {
__xdp_return(xdpf->data, &xdpf->mem, true, 0); __xdp_return(xdpf->data, &xdpf->mem, true, 0, NULL);
} }
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_return_buff(struct xdp_buff *xdp) void xdp_return_buff(struct xdp_buff *xdp)
{ {
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle, xdp);
} }
EXPORT_SYMBOL_GPL(xdp_return_buff); EXPORT_SYMBOL_GPL(xdp_return_buff);
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o xskmap.o obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o xskmap.o
obj-$(CONFIG_XDP_SOCKETS) += xsk_buff_pool.o
obj-$(CONFIG_XDP_SOCKETS_DIAG) += xsk_diag.o obj-$(CONFIG_XDP_SOCKETS_DIAG) += xsk_diag.o
...@@ -245,7 +245,7 @@ static void xdp_umem_release(struct xdp_umem *umem) ...@@ -245,7 +245,7 @@ static void xdp_umem_release(struct xdp_umem *umem)
} }
xsk_reuseq_destroy(umem); xsk_reuseq_destroy(umem);
xp_destroy(umem->pool);
xdp_umem_unmap_pages(umem); xdp_umem_unmap_pages(umem);
xdp_umem_unpin_pages(umem); xdp_umem_unpin_pages(umem);
...@@ -390,6 +390,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) ...@@ -390,6 +390,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->size = size; umem->size = size;
umem->headroom = headroom; umem->headroom = headroom;
umem->chunk_size_nohr = chunk_size - headroom; umem->chunk_size_nohr = chunk_size - headroom;
umem->chunk_size = chunk_size;
umem->npgs = size / PAGE_SIZE; umem->npgs = size / PAGE_SIZE;
umem->pgs = NULL; umem->pgs = NULL;
umem->user = NULL; umem->user = NULL;
...@@ -415,11 +416,21 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) ...@@ -415,11 +416,21 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
} }
err = xdp_umem_map_pages(umem); err = xdp_umem_map_pages(umem);
if (!err) if (err)
goto out_pages;
umem->pool = xp_create(umem->pgs, umem->npgs, chunks, chunk_size,
headroom, size, unaligned_chunks);
if (!umem->pool) {
err = -ENOMEM;
goto out_unmap;
}
return 0; return 0;
out_unmap:
xdp_umem_unmap_pages(umem);
out_pages:
kvfree(umem->pages); kvfree(umem->pages);
out_pin: out_pin:
xdp_umem_unpin_pages(umem); xdp_umem_unpin_pages(umem);
out_account: out_account:
......
...@@ -117,76 +117,67 @@ bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) ...@@ -117,76 +117,67 @@ bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
} }
EXPORT_SYMBOL(xsk_umem_uses_need_wakeup); EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
* each page. This is only required in copy mode.
*/
static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
u32 len, u32 metalen)
{ {
void *to_buf = xdp_umem_get_data(umem, addr); struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
u64 addr;
addr = xsk_umem_add_offset_to_addr(addr); int err;
if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) {
void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
u64 page_start = addr & ~(PAGE_SIZE - 1);
u64 first_len = PAGE_SIZE - (addr - page_start);
memcpy(to_buf, from_buf, first_len);
memcpy(next_pg_addr, from_buf + first_len,
len + metalen - first_len);
return; addr = xp_get_handle(xskb);
err = xskq_prod_reserve_desc(xs->rx, addr, len);
if (err) {
xs->rx_dropped++;
return err;
} }
memcpy(to_buf, from_buf, len + metalen); xp_release(xskb);
return 0;
} }
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
{ {
u64 offset = xs->umem->headroom; void *from_buf, *to_buf;
u64 addr, memcpy_addr;
void *from_buf;
u32 metalen; u32 metalen;
int err;
if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) || if (unlikely(xdp_data_meta_unsupported(from))) {
len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { from_buf = from->data;
xs->rx_dropped++; to_buf = to->data;
return -ENOSPC;
}
if (unlikely(xdp_data_meta_unsupported(xdp))) {
from_buf = xdp->data;
metalen = 0; metalen = 0;
} else { } else {
from_buf = xdp->data_meta; from_buf = from->data_meta;
metalen = xdp->data - xdp->data_meta; metalen = from->data - from->data_meta;
} to_buf = to->data - metalen;
memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
__xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
offset += metalen;
addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
err = xskq_prod_reserve_desc(xs->rx, addr, len);
if (!err) {
xskq_cons_release(xs->umem->fq);
xdp_return_buff(xdp);
return 0;
} }
xs->rx_dropped++; memcpy(to_buf, from_buf, len + metalen);
return err;
} }
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
bool explicit_free)
{ {
int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len); struct xdp_buff *xsk_xdp;
int err;
if (err) if (len > xsk_umem_get_rx_frame_size(xs->umem)) {
xs->rx_dropped++; xs->rx_dropped++;
return -ENOSPC;
}
xsk_xdp = xsk_buff_alloc(xs->umem);
if (!xsk_xdp) {
xs->rx_dropped++;
return -ENOSPC;
}
xsk_copy_xdp(xsk_xdp, xdp, len);
err = __xsk_rcv_zc(xs, xsk_xdp, len);
if (err) {
xsk_buff_free(xsk_xdp);
return err; return err;
}
if (explicit_free)
xdp_return_buff(xdp);
return 0;
} }
static bool xsk_is_bound(struct xdp_sock *xs) static bool xsk_is_bound(struct xdp_sock *xs)
...@@ -199,7 +190,8 @@ static bool xsk_is_bound(struct xdp_sock *xs) ...@@ -199,7 +190,8 @@ static bool xsk_is_bound(struct xdp_sock *xs)
return false; return false;
} }
static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
bool explicit_free)
{ {
u32 len; u32 len;
...@@ -211,8 +203,10 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -211,8 +203,10 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
len = xdp->data_end - xdp->data; len = xdp->data_end - xdp->data;
return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ? return xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY ||
__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len); xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
__xsk_rcv_zc(xs, xdp, len) :
__xsk_rcv(xs, xdp, len, explicit_free);
} }
static void xsk_flush(struct xdp_sock *xs) static void xsk_flush(struct xdp_sock *xs)
...@@ -224,46 +218,11 @@ static void xsk_flush(struct xdp_sock *xs) ...@@ -224,46 +218,11 @@ static void xsk_flush(struct xdp_sock *xs)
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ {
u32 metalen = xdp->data - xdp->data_meta;
u32 len = xdp->data_end - xdp->data;
u64 offset = xs->umem->headroom;
void *buffer;
u64 addr;
int err; int err;
spin_lock_bh(&xs->rx_lock); spin_lock_bh(&xs->rx_lock);
err = xsk_rcv(xs, xdp, false);
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) { xsk_flush(xs);
err = -EINVAL;
goto out_unlock;
}
if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
err = -ENOSPC;
goto out_drop;
}
addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
buffer = xdp_umem_get_data(xs->umem, addr);
memcpy(buffer, xdp->data_meta, len + metalen);
addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
err = xskq_prod_reserve_desc(xs->rx, addr, len);
if (err)
goto out_drop;
xskq_cons_release(xs->umem->fq);
xskq_prod_submit(xs->rx);
spin_unlock_bh(&xs->rx_lock);
xs->sk.sk_data_ready(&xs->sk);
return 0;
out_drop:
xs->rx_dropped++;
out_unlock:
spin_unlock_bh(&xs->rx_lock); spin_unlock_bh(&xs->rx_lock);
return err; return err;
} }
...@@ -273,7 +232,7 @@ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -273,7 +232,7 @@ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
int err; int err;
err = xsk_rcv(xs, xdp); err = xsk_rcv(xs, xdp, true);
if (err) if (err)
return err; return err;
...@@ -404,7 +363,7 @@ static int xsk_generic_xmit(struct sock *sk) ...@@ -404,7 +363,7 @@ static int xsk_generic_xmit(struct sock *sk)
skb_put(skb, len); skb_put(skb, len);
addr = desc.addr; addr = desc.addr;
buffer = xdp_umem_get_data(xs->umem, addr); buffer = xsk_buff_raw_get_data(xs->umem, addr);
err = skb_store_bits(skb, 0, buffer, len); err = skb_store_bits(skb, 0, buffer, len);
/* This is the backpressure mechanism for the Tx path. /* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed * Reserve space in the completion queue and only proceed
...@@ -860,6 +819,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, ...@@ -860,6 +819,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq : q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
&xs->umem->cq; &xs->umem->cq;
err = xsk_init_queue(entries, q, true); err = xsk_init_queue(entries, q, true);
if (optname == XDP_UMEM_FILL_RING)
xp_set_fq(xs->umem->pool, *q);
mutex_unlock(&xs->mutex); mutex_unlock(&xs->mutex);
return err; return err;
} }
......
This diff is collapsed.
...@@ -56,7 +56,7 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) ...@@ -56,7 +56,7 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
du.id = umem->id; du.id = umem->id;
du.size = umem->size; du.size = umem->size;
du.num_pages = umem->npgs; du.num_pages = umem->npgs;
du.chunk_size = umem->chunk_size_nohr + umem->headroom; du.chunk_size = umem->chunk_size;
du.headroom = umem->headroom; du.headroom = umem->headroom;
du.ifindex = umem->dev ? umem->dev->ifindex : 0; du.ifindex = umem->dev ? umem->dev->ifindex : 0;
du.queue_id = umem->queue_id; du.queue_id = umem->queue_id;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/if_xdp.h> #include <linux/if_xdp.h>
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <net/xsk_buff_pool.h>
#include "xsk.h" #include "xsk.h"
...@@ -172,31 +173,45 @@ static inline bool xskq_cons_read_addr(struct xsk_queue *q, u64 *addr, ...@@ -172,31 +173,45 @@ static inline bool xskq_cons_read_addr(struct xsk_queue *q, u64 *addr,
return false; return false;
} }
static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, static inline bool xskq_cons_read_addr_aligned(struct xsk_queue *q, u64 *addr)
struct xdp_desc *d,
struct xdp_umem *umem)
{ {
if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
if (!xskq_cons_is_valid_unaligned(q, d->addr, d->len, umem))
return false;
if (d->len > umem->chunk_size_nohr || d->options) { while (q->cached_cons != q->cached_prod) {
q->invalid_descs++; u32 idx = q->cached_cons & q->ring_mask;
return false;
*addr = ring->desc[idx];
if (xskq_cons_is_valid_addr(q, *addr))
return true;
q->cached_cons++;
} }
return false;
}
static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
if (q->cached_cons != q->cached_prod) {
u32 idx = q->cached_cons & q->ring_mask;
*addr = ring->desc[idx];
return true; return true;
} }
if (!xskq_cons_is_valid_addr(q, d->addr))
return false; return false;
}
if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) || static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
d->options) { struct xdp_desc *d,
struct xdp_umem *umem)
{
if (!xp_validate_desc(umem->pool, d)) {
q->invalid_descs++; q->invalid_descs++;
return false; return false;
} }
return true; return true;
} }
...@@ -260,6 +275,20 @@ static inline bool xskq_cons_peek_addr(struct xsk_queue *q, u64 *addr, ...@@ -260,6 +275,20 @@ static inline bool xskq_cons_peek_addr(struct xsk_queue *q, u64 *addr,
return xskq_cons_read_addr(q, addr, umem); return xskq_cons_read_addr(q, addr, umem);
} }
static inline bool xskq_cons_peek_addr_aligned(struct xsk_queue *q, u64 *addr)
{
if (q->cached_prod == q->cached_cons)
xskq_cons_get_entries(q);
return xskq_cons_read_addr_aligned(q, addr);
}
static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
{
if (q->cached_prod == q->cached_cons)
xskq_cons_get_entries(q);
return xskq_cons_read_addr_unchecked(q, addr);
}
static inline bool xskq_cons_peek_desc(struct xsk_queue *q, static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
struct xdp_desc *desc, struct xdp_desc *desc,
struct xdp_umem *umem) struct xdp_umem *umem)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment