Commit b9b6b68e authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

xsk: add Rx queue setup and mmap support

Another setsockopt (XDP_RX_QUEUE) is added to let the process allocate
a queue, where the kernel can pass completed Rx frames from the kernel
to user process.

The mmapping of the queue is done using the XDP_PGOFF_RX_QUEUE offset.
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 423f3832
...@@ -18,11 +18,15 @@ ...@@ -18,11 +18,15 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
struct net_device;
struct xsk_queue;
struct xdp_umem; struct xdp_umem;
struct xdp_sock { struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */ /* struct sock must be the first member of struct xdp_sock */
struct sock sk; struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem; struct xdp_umem *umem;
/* Protects multiple processes in the control path */ /* Protects multiple processes in the control path */
struct mutex mutex; struct mutex mutex;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/types.h> #include <linux/types.h>
/* XDP socket options */ /* XDP socket options */
#define XDP_RX_RING 1
#define XDP_UMEM_REG 3 #define XDP_UMEM_REG 3
#define XDP_UMEM_FILL_RING 4 #define XDP_UMEM_FILL_RING 4
...@@ -33,13 +34,28 @@ struct xdp_umem_reg { ...@@ -33,13 +34,28 @@ struct xdp_umem_reg {
}; };
/* Pgoff for mmaping the rings */ /* Pgoff for mmaping the rings */
#define XDP_PGOFF_RX_RING 0
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000 #define XDP_UMEM_PGOFF_FILL_RING 0x100000000
struct xdp_desc {
__u32 idx;
__u32 len;
__u16 offset;
__u8 flags;
__u8 padding[5];
};
struct xdp_ring { struct xdp_ring {
__u32 producer __attribute__((aligned(64))); __u32 producer __attribute__((aligned(64)));
__u32 consumer __attribute__((aligned(64))); __u32 consumer __attribute__((aligned(64)));
}; };
/* Used for the RX and TX queues for packets */
struct xdp_rxtx_ring {
struct xdp_ring ptrs;
struct xdp_desc desc[0] __attribute__((aligned(64)));
};
/* Used for the fill and completion queues for buffers */ /* Used for the fill and completion queues for buffers */
struct xdp_umem_ring { struct xdp_umem_ring {
struct xdp_ring ptrs; struct xdp_ring ptrs;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/net.h> #include <linux/net.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <net/xdp.h>
#include "xsk_queue.h" #include "xsk_queue.h"
#include "xdp_umem.h" #include "xdp_umem.h"
...@@ -40,14 +41,15 @@ static struct xdp_sock *xdp_sk(struct sock *sk) ...@@ -40,14 +41,15 @@ static struct xdp_sock *xdp_sk(struct sock *sk)
return (struct xdp_sock *)sk; return (struct xdp_sock *)sk;
} }
static int xsk_init_queue(u32 entries, struct xsk_queue **queue) static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
bool umem_queue)
{ {
struct xsk_queue *q; struct xsk_queue *q;
if (entries == 0 || *queue || !is_power_of_2(entries)) if (entries == 0 || *queue || !is_power_of_2(entries))
return -EINVAL; return -EINVAL;
q = xskq_create(entries); q = xskq_create(entries, umem_queue);
if (!q) if (!q)
return -ENOMEM; return -ENOMEM;
...@@ -89,6 +91,22 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, ...@@ -89,6 +91,22 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
return -ENOPROTOOPT; return -ENOPROTOOPT;
switch (optname) { switch (optname) {
case XDP_RX_RING:
{
struct xsk_queue **q;
int entries;
if (optlen < sizeof(entries))
return -EINVAL;
if (copy_from_user(&entries, optval, sizeof(entries)))
return -EFAULT;
mutex_lock(&xs->mutex);
q = &xs->rx;
err = xsk_init_queue(entries, q, false);
mutex_unlock(&xs->mutex);
return err;
}
case XDP_UMEM_REG: case XDP_UMEM_REG:
{ {
struct xdp_umem_reg mr; struct xdp_umem_reg mr;
...@@ -130,7 +148,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, ...@@ -130,7 +148,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
mutex_lock(&xs->mutex); mutex_lock(&xs->mutex);
q = &xs->umem->fq; q = &xs->umem->fq;
err = xsk_init_queue(entries, q); err = xsk_init_queue(entries, q, true);
mutex_unlock(&xs->mutex); mutex_unlock(&xs->mutex);
return err; return err;
} }
...@@ -151,6 +169,9 @@ static int xsk_mmap(struct file *file, struct socket *sock, ...@@ -151,6 +169,9 @@ static int xsk_mmap(struct file *file, struct socket *sock,
unsigned long pfn; unsigned long pfn;
struct page *qpg; struct page *qpg;
if (offset == XDP_PGOFF_RX_RING) {
q = xs->rx;
} else {
if (!xs->umem) if (!xs->umem)
return -EINVAL; return -EINVAL;
...@@ -158,6 +179,7 @@ static int xsk_mmap(struct file *file, struct socket *sock, ...@@ -158,6 +179,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
q = xs->umem->fq; q = xs->umem->fq;
else else
return -EINVAL; return -EINVAL;
}
if (!q) if (!q)
return -EINVAL; return -EINVAL;
...@@ -205,6 +227,7 @@ static void xsk_destruct(struct sock *sk) ...@@ -205,6 +227,7 @@ static void xsk_destruct(struct sock *sk)
if (!sock_flag(sk, SOCK_DEAD)) if (!sock_flag(sk, SOCK_DEAD))
return; return;
xskq_destroy(xs->rx);
xdp_put_umem(xs->umem); xdp_put_umem(xs->umem);
sk_refcnt_debug_dec(sk); sk_refcnt_debug_dec(sk);
......
...@@ -21,7 +21,13 @@ static u32 xskq_umem_get_ring_size(struct xsk_queue *q) ...@@ -21,7 +21,13 @@ static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u32); return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u32);
} }
struct xsk_queue *xskq_create(u32 nentries) static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
{
return (sizeof(struct xdp_ring) +
q->nentries * sizeof(struct xdp_desc));
}
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
{ {
struct xsk_queue *q; struct xsk_queue *q;
gfp_t gfp_flags; gfp_t gfp_flags;
...@@ -36,7 +42,8 @@ struct xsk_queue *xskq_create(u32 nentries) ...@@ -36,7 +42,8 @@ struct xsk_queue *xskq_create(u32 nentries)
gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
__GFP_COMP | __GFP_NORETRY; __GFP_COMP | __GFP_NORETRY;
size = xskq_umem_get_ring_size(q); size = umem_queue ? xskq_umem_get_ring_size(q) :
xskq_rxtx_get_ring_size(q);
q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags, q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
get_order(size)); get_order(size));
......
...@@ -32,7 +32,7 @@ struct xsk_queue { ...@@ -32,7 +32,7 @@ struct xsk_queue {
u64 invalid_descs; u64 invalid_descs;
}; };
struct xsk_queue *xskq_create(u32 nentries); struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q); void xskq_destroy(struct xsk_queue *q);
#endif /* _LINUX_XSK_QUEUE_H */ #endif /* _LINUX_XSK_QUEUE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment