Commit d24ad3fc authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-next'

Daniel Borkmann says:

====================
BPF updates

Couple of misc updates to BPF, besides others this series adds
bpf_csum_diff() to be used with L3 csums, allows for managing
tunnel options for collect meta data mode, and enabling ipv6
traffic class for collect meta data in vxlan specifically (geneve
already supports it). For more details, please see individual
patches.

The series requires net to be merged into net-next first to
avoid any further pending merge conflicts.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 810813c4 1400615d
...@@ -775,10 +775,10 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, ...@@ -775,10 +775,10 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct flowi4 *fl4, struct flowi4 *fl4,
struct ip_tunnel_info *info) struct ip_tunnel_info *info)
{ {
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev); struct geneve_dev *geneve = netdev_priv(dev);
struct dst_cache *dst_cache; struct dst_cache *dst_cache;
struct rtable *rt = NULL; struct rtable *rt = NULL;
bool use_cache = true;
__u8 tos; __u8 tos;
memset(fl4, 0, sizeof(*fl4)); memset(fl4, 0, sizeof(*fl4));
...@@ -804,7 +804,6 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, ...@@ -804,7 +804,6 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
dst_cache = &geneve->dst_cache; dst_cache = &geneve->dst_cache;
} }
use_cache = use_cache && !skb->mark;
if (use_cache) { if (use_cache) {
rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
if (rt) if (rt)
...@@ -832,11 +831,11 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, ...@@ -832,11 +831,11 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
struct flowi6 *fl6, struct flowi6 *fl6,
struct ip_tunnel_info *info) struct ip_tunnel_info *info)
{ {
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev); struct geneve_dev *geneve = netdev_priv(dev);
struct geneve_sock *gs6 = geneve->sock6; struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL; struct dst_entry *dst = NULL;
struct dst_cache *dst_cache; struct dst_cache *dst_cache;
bool use_cache = true;
__u8 prio; __u8 prio;
memset(fl6, 0, sizeof(*fl6)); memset(fl6, 0, sizeof(*fl6));
...@@ -862,7 +861,6 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, ...@@ -862,7 +861,6 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
dst_cache = &geneve->dst_cache; dst_cache = &geneve->dst_cache;
} }
use_cache = use_cache && !skb->mark;
if (use_cache) { if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
if (dst) if (dst)
...@@ -940,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -940,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
u8 vni[3]; u8 vni[3];
tunnel_id_to_vni(key->tun_id, vni); tunnel_id_to_vni(key->tun_id, vni);
if (key->tun_flags & TUNNEL_GENEVE_OPT) if (info->options_len)
opts = ip_tunnel_info_opts(info); opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM) if (key->tun_flags & TUNNEL_CSUM)
...@@ -1027,7 +1025,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -1027,7 +1025,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
u8 vni[3]; u8 vni[3];
tunnel_id_to_vni(key->tun_id, vni); tunnel_id_to_vni(key->tun_id, vni);
if (key->tun_flags & TUNNEL_GENEVE_OPT) if (info->options_len)
opts = ip_tunnel_info_opts(info); opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM) if (key->tun_flags & TUNNEL_CSUM)
......
...@@ -1756,17 +1756,15 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, ...@@ -1756,17 +1756,15 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
struct sk_buff *skb, int oif, u8 tos, struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr, __be32 daddr, __be32 *saddr,
struct dst_cache *dst_cache, struct dst_cache *dst_cache,
struct ip_tunnel_info *info) const struct ip_tunnel_info *info)
{ {
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct rtable *rt = NULL; struct rtable *rt = NULL;
bool use_cache = false;
struct flowi4 fl4; struct flowi4 fl4;
/* when the ip_tunnel_info is availble, the tos used for lookup is if (tos && !info)
* packet independent, so we can use the cache use_cache = false;
*/ if (use_cache) {
if (!skb->mark && (!tos || info)) {
use_cache = true;
rt = dst_cache_get_ip4(dst_cache, saddr); rt = dst_cache_get_ip4(dst_cache, saddr);
if (rt) if (rt)
return rt; return rt;
...@@ -1791,16 +1789,20 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, ...@@ -1791,16 +1789,20 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct sk_buff *skb, int oif, struct sk_buff *skb, int oif, u8 tos,
const struct in6_addr *daddr, const struct in6_addr *daddr,
struct in6_addr *saddr, struct in6_addr *saddr,
struct dst_cache *dst_cache) struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{ {
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst; struct dst_entry *ndst;
struct flowi6 fl6; struct flowi6 fl6;
int err; int err;
if (!skb->mark) { if (tos && !info)
use_cache = false;
if (use_cache) {
ndst = dst_cache_get_ip6(dst_cache, saddr); ndst = dst_cache_get_ip6(dst_cache, saddr);
if (ndst) if (ndst)
return ndst; return ndst;
...@@ -1808,6 +1810,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, ...@@ -1808,6 +1810,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
memset(&fl6, 0, sizeof(fl6)); memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif; fl6.flowi6_oif = oif;
fl6.flowi6_tos = RT_TOS(tos);
fl6.daddr = *daddr; fl6.daddr = *daddr;
fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
fl6.flowi6_mark = skb->mark; fl6.flowi6_mark = skb->mark;
...@@ -1820,7 +1823,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, ...@@ -1820,7 +1823,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
return ERR_PTR(err); return ERR_PTR(err);
*saddr = fl6.saddr; *saddr = fl6.saddr;
if (!skb->mark) if (use_cache)
dst_cache_set_ip6(dst_cache, ndst, saddr); dst_cache_set_ip6(dst_cache, ndst, saddr);
return ndst; return ndst;
} }
...@@ -2016,9 +2019,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2016,9 +2019,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
sk = vxlan->vn6_sock->sock->sk; sk = vxlan->vn6_sock->sock->sk;
ndst = vxlan6_get_route(vxlan, skb, ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, rdst ? rdst->remote_ifindex : 0, tos,
&dst->sin6.sin6_addr, &saddr, &dst->sin6.sin6_addr, &saddr,
dst_cache); dst_cache, info);
if (IS_ERR(ndst)) { if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n", netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr); &dst->sin6.sin6_addr);
...@@ -2053,6 +2056,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2053,6 +2056,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (!info) if (!info)
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst); ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet); skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
...@@ -2062,8 +2066,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2062,8 +2066,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return; return;
} }
udp_tunnel6_xmit_skb(ndst, sk, skb, dev, udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
&saddr, &dst->sin6.sin6_addr, &saddr, &dst->sin6.sin6_addr, tos, ttl,
0, ttl, src_port, dst_port, !udp_sum); src_port, dst_port, !udp_sum);
#endif #endif
} }
...@@ -2385,9 +2389,9 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) ...@@ -2385,9 +2389,9 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
if (!vxlan->vn6_sock) if (!vxlan->vn6_sock)
return -EINVAL; return -EINVAL;
ndst = vxlan6_get_route(vxlan, skb, 0, ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
&info->key.u.ipv6.dst, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL); &info->key.u.ipv6.src, NULL, info);
if (IS_ERR(ndst)) if (IS_ERR(ndst))
return PTR_ERR(ndst); return PTR_ERR(ndst);
dst_release(ndst); dst_release(ndst);
......
...@@ -120,6 +120,11 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) ...@@ -120,6 +120,11 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
#define CSUM_MANGLED_0 ((__force __sum16)0xffff) #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{ {
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
......
...@@ -140,6 +140,7 @@ struct ip_tunnel { ...@@ -140,6 +140,7 @@ struct ip_tunnel {
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800) #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT) #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
...@@ -206,6 +207,20 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, ...@@ -206,6 +207,20 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
0, sizeof(*key) - IP_TUNNEL_KEY_SIZE); 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
} }
static inline bool
ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
if (skb->mark)
return false;
if (!info)
return true;
if (info->key.tun_flags & TUNNEL_NOCACHE)
return false;
return true;
}
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
*tun_info) *tun_info)
{ {
......
...@@ -298,6 +298,17 @@ enum bpf_func_id { ...@@ -298,6 +298,17 @@ enum bpf_func_id {
* Return: csum result * Return: csum result
*/ */
BPF_FUNC_csum_diff, BPF_FUNC_csum_diff,
/**
* bpf_skb_[gs]et_tunnel_opt(skb, opt, size)
* retrieve or populate tunnel options metadata
* @skb: pointer to skb
* @opt: pointer to raw tunnel option data
* @size: size of @opt
* Return: 0 on success for set, option size for get
*/
BPF_FUNC_skb_get_tunnel_opt,
BPF_FUNC_skb_set_tunnel_opt,
__BPF_FUNC_MAX_ID, __BPF_FUNC_MAX_ID,
}; };
...@@ -305,6 +316,7 @@ enum bpf_func_id { ...@@ -305,6 +316,7 @@ enum bpf_func_id {
/* BPF_FUNC_skb_store_bytes flags. */ /* BPF_FUNC_skb_store_bytes flags. */
#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) #define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
#define BPF_F_INVALIDATE_HASH (1ULL << 1)
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size. * First 4 bits are for passing the header field size.
...@@ -329,6 +341,7 @@ enum bpf_func_id { ...@@ -329,6 +341,7 @@ enum bpf_func_id {
/* BPF_FUNC_skb_set_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
/* user accessible mirror of in-kernel sk_buff. /* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure * new fields can only be added to the end of this structure
......
...@@ -1353,7 +1353,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) ...@@ -1353,7 +1353,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
unsigned int len = (unsigned int) r4; unsigned int len = (unsigned int) r4;
void *ptr; void *ptr;
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM))) if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL; return -EINVAL;
/* bpf verifier guarantees that: /* bpf verifier guarantees that:
...@@ -1384,11 +1384,13 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) ...@@ -1384,11 +1384,13 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
if (flags & BPF_F_RECOMPUTE_CSUM) if (flags & BPF_F_RECOMPUTE_CSUM)
skb_postpush_rcsum(skb, ptr, len); skb_postpush_rcsum(skb, ptr, len);
if (flags & BPF_F_INVALIDATE_HASH)
skb_clear_hash(skb);
return 0; return 0;
} }
const struct bpf_func_proto bpf_skb_store_bytes_proto = { static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
.func = bpf_skb_store_bytes, .func = bpf_skb_store_bytes,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1419,7 +1421,7 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ...@@ -1419,7 +1421,7 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return 0; return 0;
} }
const struct bpf_func_proto bpf_skb_load_bytes_proto = { static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.func = bpf_skb_load_bytes, .func = bpf_skb_load_bytes,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1447,6 +1449,12 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) ...@@ -1447,6 +1449,12 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return -EFAULT; return -EFAULT;
switch (flags & BPF_F_HDR_FIELD_MASK) { switch (flags & BPF_F_HDR_FIELD_MASK) {
case 0:
if (unlikely(from != 0))
return -EINVAL;
csum_replace_by_diff(ptr, to);
break;
case 2: case 2:
csum_replace2(ptr, from, to); csum_replace2(ptr, from, to);
break; break;
...@@ -1464,7 +1472,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) ...@@ -1464,7 +1472,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return 0; return 0;
} }
const struct bpf_func_proto bpf_l3_csum_replace_proto = { static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
.func = bpf_l3_csum_replace, .func = bpf_l3_csum_replace,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1523,7 +1531,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) ...@@ -1523,7 +1531,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return 0; return 0;
} }
const struct bpf_func_proto bpf_l4_csum_replace_proto = { static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
.func = bpf_l4_csum_replace, .func = bpf_l4_csum_replace,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1562,7 +1570,7 @@ static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed) ...@@ -1562,7 +1570,7 @@ static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
return csum_partial(sp->diff, diff_size, seed); return csum_partial(sp->diff, diff_size, seed);
} }
const struct bpf_func_proto bpf_csum_diff_proto = { static const struct bpf_func_proto bpf_csum_diff_proto = {
.func = bpf_csum_diff, .func = bpf_csum_diff,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1600,7 +1608,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) ...@@ -1600,7 +1608,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
return dev_queue_xmit(skb2); return dev_queue_xmit(skb2);
} }
const struct bpf_func_proto bpf_clone_redirect_proto = { static const struct bpf_func_proto bpf_clone_redirect_proto = {
.func = bpf_clone_redirect, .func = bpf_clone_redirect,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1652,7 +1660,7 @@ int skb_do_redirect(struct sk_buff *skb) ...@@ -1652,7 +1660,7 @@ int skb_do_redirect(struct sk_buff *skb)
return dev_queue_xmit(skb); return dev_queue_xmit(skb);
} }
const struct bpf_func_proto bpf_redirect_proto = { static const struct bpf_func_proto bpf_redirect_proto = {
.func = bpf_redirect, .func = bpf_redirect,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1791,7 +1799,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ...@@ -1791,7 +1799,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
return 0; return 0;
} }
const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
.func = bpf_skb_get_tunnel_key, .func = bpf_skb_get_tunnel_key,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1801,6 +1809,32 @@ const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { ...@@ -1801,6 +1809,32 @@ const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
}; };
static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
u8 *to = (u8 *) (long) r2;
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
if (unlikely(!info ||
!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)))
return -ENOENT;
if (unlikely(size < info->options_len))
return -ENOMEM;
ip_tunnel_info_opts_get(to, info);
return info->options_len;
}
static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
.func = bpf_skb_get_tunnel_opt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_STACK,
.arg3_type = ARG_CONST_STACK_SIZE,
};
static struct metadata_dst __percpu *md_dst; static struct metadata_dst __percpu *md_dst;
static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
...@@ -1811,7 +1845,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ...@@ -1811,7 +1845,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
u8 compat[sizeof(struct bpf_tunnel_key)]; u8 compat[sizeof(struct bpf_tunnel_key)];
struct ip_tunnel_info *info; struct ip_tunnel_info *info;
if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX))) if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
BPF_F_DONT_FRAGMENT)))
return -EINVAL; return -EINVAL;
if (unlikely(size != sizeof(struct bpf_tunnel_key))) { if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
switch (size) { switch (size) {
...@@ -1835,7 +1870,10 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ...@@ -1835,7 +1870,10 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
info = &md->u.tun_info; info = &md->u.tun_info;
info->mode = IP_TUNNEL_INFO_TX; info->mode = IP_TUNNEL_INFO_TX;
info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM; info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
if (flags & BPF_F_DONT_FRAGMENT)
info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.tos = from->tunnel_tos; info->key.tos = from->tunnel_tos;
info->key.ttl = from->tunnel_ttl; info->key.ttl = from->tunnel_ttl;
...@@ -1853,7 +1891,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ...@@ -1853,7 +1891,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
return 0; return 0;
} }
const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.func = bpf_skb_set_tunnel_key, .func = bpf_skb_set_tunnel_key,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
...@@ -1863,17 +1901,58 @@ const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { ...@@ -1863,17 +1901,58 @@ const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
}; };
static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void) #define BPF_TUNLEN_MAX 255
static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
u8 *from = (u8 *) (long) r2;
struct ip_tunnel_info *info = skb_tunnel_info(skb);
const struct metadata_dst *md = this_cpu_ptr(md_dst);
if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
return -EINVAL;
if (unlikely(size > BPF_TUNLEN_MAX))
return -ENOMEM;
ip_tunnel_info_opts_set(info, from, size);
return 0;
}
static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
.func = bpf_skb_set_tunnel_opt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_STACK,
.arg3_type = ARG_CONST_STACK_SIZE,
};
static const struct bpf_func_proto *
bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
{ {
if (!md_dst) { if (!md_dst) {
/* race is not possible, since it's called from BUILD_BUG_ON(FIELD_SIZEOF(struct ip_tunnel_info,
* verifier that is holding verifier mutex options_len) != 1);
/* Race is not possible, since it's called from verifier
* that is holding verifier mutex.
*/ */
md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL); md_dst = metadata_dst_alloc_percpu(BPF_TUNLEN_MAX,
GFP_KERNEL);
if (!md_dst) if (!md_dst)
return NULL; return NULL;
} }
return &bpf_skb_set_tunnel_key_proto;
switch (which) {
case BPF_FUNC_skb_set_tunnel_key:
return &bpf_skb_set_tunnel_key_proto;
case BPF_FUNC_skb_set_tunnel_opt:
return &bpf_skb_set_tunnel_opt_proto;
default:
return NULL;
}
} }
static const struct bpf_func_proto * static const struct bpf_func_proto *
...@@ -1927,7 +2006,11 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) ...@@ -1927,7 +2006,11 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skb_get_tunnel_key: case BPF_FUNC_skb_get_tunnel_key:
return &bpf_skb_get_tunnel_key_proto; return &bpf_skb_get_tunnel_key_proto;
case BPF_FUNC_skb_set_tunnel_key: case BPF_FUNC_skb_set_tunnel_key:
return bpf_get_skb_set_tunnel_key_proto(); return bpf_get_skb_set_tunnel_proto(func_id);
case BPF_FUNC_skb_get_tunnel_opt:
return &bpf_skb_get_tunnel_opt_proto;
case BPF_FUNC_skb_set_tunnel_opt:
return bpf_get_skb_set_tunnel_proto(func_id);
case BPF_FUNC_redirect: case BPF_FUNC_redirect:
return &bpf_redirect_proto; return &bpf_redirect_proto;
case BPF_FUNC_get_route_realm: case BPF_FUNC_get_route_realm:
......
...@@ -527,11 +527,12 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -527,11 +527,12 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip_tunnel_info *tun_info; struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key; const struct ip_tunnel_key *key;
struct rtable *rt = NULL;
struct flowi4 fl; struct flowi4 fl;
struct rtable *rt;
int min_headroom; int min_headroom;
int tunnel_hlen; int tunnel_hlen;
__be16 df, flags; __be16 df, flags;
bool use_cache;
int err; int err;
tun_info = skb_tunnel_info(skb); tun_info = skb_tunnel_info(skb);
...@@ -540,13 +541,14 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -540,13 +541,14 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_skb; goto err_free_skb;
key = &tun_info->key; key = &tun_info->key;
rt = !skb->mark ? dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr) : use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
NULL; if (use_cache)
rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
if (!rt) { if (!rt) {
rt = gre_get_rt(skb, dev, &fl, key); rt = gre_get_rt(skb, dev, &fl, key);
if (IS_ERR(rt)) if (IS_ERR(rt))
goto err_free_skb; goto err_free_skb;
if (!skb->mark) if (use_cache)
dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
fl.saddr); fl.saddr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment