Commit fe21cb91 authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

net: core: Split out code to run generic XDP prog

This helper can later be utilized in code that runs cpumap and devmap
programs in generic redirect mode and adjust skb based on changes made
to xdp_buff.

When returning XDP_REDIRECT/XDP_TX, it invokes __skb_push, so whenever a
generic redirect path invokes devmap/cpumap prog if set, it must
__skb_pull again as we expect mac header to be pulled.

It also drops the skb_reset_mac_len call after do_xdp_generic, as the
mac_header and network_header are advanced by the same offset, so the
difference (mac_len) remains constant.
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Reviewed-by: default avatarToke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20210702111825.491065-2-memxor@gmail.com
parent a080cdcc
...@@ -3984,6 +3984,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) ...@@ -3984,6 +3984,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
} }
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb); int netif_rx(struct sk_buff *skb);
......
...@@ -4744,45 +4744,18 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) ...@@ -4744,45 +4744,18 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
return rxqueue; return rxqueue;
} }
static u32 netif_receive_generic_xdp(struct sk_buff *skb, u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
struct bpf_prog *xdp_prog)
{ {
void *orig_data, *orig_data_end, *hard_start; void *orig_data, *orig_data_end, *hard_start;
struct netdev_rx_queue *rxqueue; struct netdev_rx_queue *rxqueue;
u32 metalen, act = XDP_DROP;
bool orig_bcast, orig_host; bool orig_bcast, orig_host;
u32 mac_len, frame_sz; u32 mac_len, frame_sz;
__be16 orig_eth_type; __be16 orig_eth_type;
struct ethhdr *eth; struct ethhdr *eth;
u32 metalen, act;
int off; int off;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
if (skb_is_redirected(skb))
return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom
* of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
* native XDP provides, thus we need to do it here as well.
*/
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
int troom = skb->tail + skb->data_len - skb->end;
/* In case we have to go down the path and also linearize,
* then lets do the pskb_expand_head() work just once here.
*/
if (pskb_expand_head(skb,
hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
goto do_drop;
if (skb_linearize(skb))
goto do_drop;
}
/* The XDP program wants to see the packet starting at the MAC /* The XDP program wants to see the packet starting at the MAC
* header. * header.
*/ */
...@@ -4837,6 +4810,13 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, ...@@ -4837,6 +4810,13 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, skb->dev); skb->protocol = eth_type_trans(skb, skb->dev);
} }
/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
* before calling us again on redirect path. We do not call do_redirect
* as we leave that up to the caller.
*
* Caller is responsible for managing lifetime of skb (i.e. calling
* kfree_skb in response to actions it cannot handle/XDP_DROP).
*/
switch (act) { switch (act) {
case XDP_REDIRECT: case XDP_REDIRECT:
case XDP_TX: case XDP_TX:
...@@ -4847,6 +4827,49 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, ...@@ -4847,6 +4827,49 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
if (metalen) if (metalen)
skb_metadata_set(skb, metalen); skb_metadata_set(skb, metalen);
break; break;
}
return act;
}
static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
u32 act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
if (skb_is_redirected(skb))
return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom
* of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
* native XDP provides, thus we need to do it here as well.
*/
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
int troom = skb->tail + skb->data_len - skb->end;
/* In case we have to go down the path and also linearize,
* then lets do the pskb_expand_head() work just once here.
*/
if (pskb_expand_head(skb,
hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
goto do_drop;
if (skb_linearize(skb))
goto do_drop;
}
act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
fallthrough; fallthrough;
...@@ -5312,7 +5335,6 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, ...@@ -5312,7 +5335,6 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
ret = NET_RX_DROP; ret = NET_RX_DROP;
goto out; goto out;
} }
skb_reset_mac_len(skb);
} }
if (eth_type_vlan(skb->protocol)) { if (eth_type_vlan(skb->protocol)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment