Commit 4e3264d2 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by David S. Miller

bpf: Fix bpf_redirect to an ipip/ip6tnl dev

If the bpf program calls bpf_redirect(dev, 0) and dev is
an ipip/ip6tnl, it currently includes the mac header.
e.g. If dev is ipip, the end result is IP-EthHdr-IP instead
of IP-IP.

The fix is to pull the mac header.  At ingress, skb_postpull_rcsum()
is not needed because the ethhdr should have been pulled once already
and then got pushed back just before calling the bpf_prog.
At egress, this patch calls skb_postpull_rcsum().

If bpf_redirect(dev, BPF_F_INGRESS) is called,
it also fails now because it calls dev_forward_skb() which
eventually calls eth_type_trans(skb, dev).  The eth_type_trans()
will set skb->type = PACKET_OTHERHOST because the mac address
does not match the redirecting dev->dev_addr.  The PACKET_OTHERHOST
will eventually cause the ip_rcv() errors out.  To fix this,
____dev_forward_skb() is added.

Joint work with Daniel Borkmann.

Fixes: cfc7381b ("ip_tunnel: add collect_md mode to IPIP tunnel")
Fixes: 8d79266b ("ip6_tunnel: add collect_md mode to IPv6 tunnels")
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@fb.com>
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 23dd8315
...@@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); ...@@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev, bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb); const struct sk_buff *skb);
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_scrub_packet(skb, true);
skb->priority = 0;
return 0;
}
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget; extern int netdev_budget;
......
...@@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); ...@@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{ {
if (skb_orphan_frags(skb, GFP_ATOMIC) || int ret = ____dev_forward_skb(dev, skb);
unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_scrub_packet(skb, true); if (likely(!ret)) {
skb->priority = 0;
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
return 0; return ret;
} }
EXPORT_SYMBOL_GPL(__dev_forward_skb); EXPORT_SYMBOL_GPL(__dev_forward_skb);
......
...@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) ...@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
return dev_forward_skb(dev, skb); return dev_forward_skb(dev, skb);
} }
static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
struct sk_buff *skb)
{
int ret = ____dev_forward_skb(dev, skb);
if (likely(!ret)) {
skb->dev = dev;
ret = netif_rx(skb);
}
return ret;
}
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{ {
int ret; int ret;
...@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) ...@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
return ret; return ret;
} }
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
/* skb->mac_len is not set on normal egress */
unsigned int mlen = skb->network_header - skb->mac_header;
__skb_pull(skb, mlen);
/* At ingress, the mac header has already been pulled once.
* At egress, skb_pospull_rcsum has to be done in case that
* the skb is originated from ingress (i.e. a forwarded skb)
* to ensure that rcsum starts at net header.
*/
if (!skb_at_tc_ingress(skb))
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
skb_pop_mac_header(skb);
skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ?
__bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
}
static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
bpf_push_mac_rcsum(skb);
return flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
switch (dev->type) {
case ARPHRD_TUNNEL:
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
return __bpf_redirect_no_mac(skb, dev, flags);
default:
return __bpf_redirect_common(skb, dev, flags);
}
}
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
{ {
struct net_device *dev; struct net_device *dev;
...@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) ...@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
return -ENOMEM; return -ENOMEM;
} }
bpf_push_mac_rcsum(clone); return __bpf_redirect(clone, dev, flags);
return flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
} }
static const struct bpf_func_proto bpf_clone_redirect_proto = { static const struct bpf_func_proto bpf_clone_redirect_proto = {
...@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb) ...@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
return -EINVAL; return -EINVAL;
} }
bpf_push_mac_rcsum(skb); return __bpf_redirect(skb, dev, ri->flags);
return ri->flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
} }
static const struct bpf_func_proto bpf_redirect_proto = { static const struct bpf_func_proto bpf_redirect_proto = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment