Commit 02671e23 authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

xsk: wire up XDP_SKB side of AF_XDP

This commit wires up the xskmap to XDP_SKB layer.
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 1b1a251c
...@@ -760,7 +760,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, ...@@ -760,7 +760,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* This does not appear to be a real limitation for existing software. * This does not appear to be a real limitation for existing software.
*/ */
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *prog); struct xdp_buff *xdp, struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev, int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp, struct xdp_buff *xdp,
struct bpf_prog *prog); struct bpf_prog *prog);
......
...@@ -3994,12 +3994,12 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) ...@@ -3994,12 +3994,12 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
} }
static u32 netif_receive_generic_xdp(struct sk_buff *skb, static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog) struct bpf_prog *xdp_prog)
{ {
struct netdev_rx_queue *rxqueue; struct netdev_rx_queue *rxqueue;
void *orig_data, *orig_data_end; void *orig_data, *orig_data_end;
u32 metalen, act = XDP_DROP; u32 metalen, act = XDP_DROP;
struct xdp_buff xdp;
int hlen, off; int hlen, off;
u32 mac_len; u32 mac_len;
...@@ -4034,19 +4034,19 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, ...@@ -4034,19 +4034,19 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
*/ */
mac_len = skb->data - skb_mac_header(skb); mac_len = skb->data - skb_mac_header(skb);
hlen = skb_headlen(skb) + mac_len; hlen = skb_headlen(skb) + mac_len;
xdp.data = skb->data - mac_len; xdp->data = skb->data - mac_len;
xdp.data_meta = xdp.data; xdp->data_meta = xdp->data;
xdp.data_end = xdp.data + hlen; xdp->data_end = xdp->data + hlen;
xdp.data_hard_start = skb->data - skb_headroom(skb); xdp->data_hard_start = skb->data - skb_headroom(skb);
orig_data_end = xdp.data_end; orig_data_end = xdp->data_end;
orig_data = xdp.data; orig_data = xdp->data;
rxqueue = netif_get_rxqueue(skb); rxqueue = netif_get_rxqueue(skb);
xdp.rxq = &rxqueue->xdp_rxq; xdp->rxq = &rxqueue->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, xdp);
off = xdp.data - orig_data; off = xdp->data - orig_data;
if (off > 0) if (off > 0)
__skb_pull(skb, off); __skb_pull(skb, off);
else if (off < 0) else if (off < 0)
...@@ -4056,10 +4056,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, ...@@ -4056,10 +4056,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* check if bpf_xdp_adjust_tail was used. it can only "shrink" /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
* pckt. * pckt.
*/ */
off = orig_data_end - xdp.data_end; off = orig_data_end - xdp->data_end;
if (off != 0) { if (off != 0) {
skb_set_tail_pointer(skb, xdp.data_end - xdp.data); skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
skb->len -= off; skb->len -= off;
} }
switch (act) { switch (act) {
...@@ -4068,7 +4069,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, ...@@ -4068,7 +4069,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
__skb_push(skb, mac_len); __skb_push(skb, mac_len);
break; break;
case XDP_PASS: case XDP_PASS:
metalen = xdp.data - xdp.data_meta; metalen = xdp->data - xdp->data_meta;
if (metalen) if (metalen)
skb_metadata_set(skb, metalen); skb_metadata_set(skb, metalen);
break; break;
...@@ -4118,17 +4119,19 @@ static struct static_key generic_xdp_needed __read_mostly; ...@@ -4118,17 +4119,19 @@ static struct static_key generic_xdp_needed __read_mostly;
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
{ {
if (xdp_prog) { if (xdp_prog) {
u32 act = netif_receive_generic_xdp(skb, xdp_prog); struct xdp_buff xdp;
u32 act;
int err; int err;
act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
if (act != XDP_PASS) { if (act != XDP_PASS) {
switch (act) { switch (act) {
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_generic_redirect(skb->dev, skb, err = xdp_do_generic_redirect(skb->dev, skb,
xdp_prog); &xdp, xdp_prog);
if (err) if (err)
goto out_redir; goto out_redir;
/* fallthru to submit skb */ break;
case XDP_TX: case XDP_TX:
generic_xdp_tx(skb, xdp_prog); generic_xdp_tx(skb, xdp_prog);
break; break;
......
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#include <net/tcp.h> #include <net/tcp.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <net/xdp_sock.h>
/** /**
* sk_filter_trim_cap - run a packet through a socket filter * sk_filter_trim_cap - run a packet through a socket filter
...@@ -2973,13 +2974,14 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd) ...@@ -2973,13 +2974,14 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
static int xdp_do_generic_redirect_map(struct net_device *dev, static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog) struct bpf_prog *xdp_prog)
{ {
struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct redirect_info *ri = this_cpu_ptr(&redirect_info);
unsigned long map_owner = ri->map_owner; unsigned long map_owner = ri->map_owner;
struct bpf_map *map = ri->map; struct bpf_map *map = ri->map;
struct net_device *fwd = NULL;
u32 index = ri->ifindex; u32 index = ri->ifindex;
void *fwd = NULL;
int err = 0; int err = 0;
ri->ifindex = 0; ri->ifindex = 0;
...@@ -3001,6 +3003,14 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, ...@@ -3001,6 +3003,14 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd)))) if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
goto err; goto err;
skb->dev = fwd; skb->dev = fwd;
generic_xdp_tx(skb, xdp_prog);
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
struct xdp_sock *xs = fwd;
err = xsk_generic_rcv(xs, xdp);
if (err)
goto err;
consume_skb(skb);
} else { } else {
/* TODO: Handle BPF_MAP_TYPE_CPUMAP */ /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
err = -EBADRQC; err = -EBADRQC;
...@@ -3015,7 +3025,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, ...@@ -3015,7 +3025,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
} }
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog) struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{ {
struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct redirect_info *ri = this_cpu_ptr(&redirect_info);
u32 index = ri->ifindex; u32 index = ri->ifindex;
...@@ -3023,7 +3033,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, ...@@ -3023,7 +3033,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int err = 0; int err = 0;
if (ri->map) if (ri->map)
return xdp_do_generic_redirect_map(dev, skb, xdp_prog); return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
ri->ifindex = 0; ri->ifindex = 0;
fwd = dev_get_by_index_rcu(dev_net(dev), index); fwd = dev_get_by_index_rcu(dev_net(dev), index);
...@@ -3037,6 +3047,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, ...@@ -3037,6 +3047,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
skb->dev = fwd; skb->dev = fwd;
_trace_xdp_redirect(dev, xdp_prog, index); _trace_xdp_redirect(dev, xdp_prog, index);
generic_xdp_tx(skb, xdp_prog);
return 0; return 0;
err: err:
_trace_xdp_redirect_err(dev, xdp_prog, index, err); _trace_xdp_redirect_err(dev, xdp_prog, index, err);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment