Commit 3004932c authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-misc'

Daniel Borkmann says:

====================
Minor BPF follow-ups

Some minor last follow-ups I still had in my queue. The first one adds
readability support for __sk_buff's tc_classid member, the remaining
two are some minor cleanups. For details please see individual patches.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 019ded3a fca5fdf6
...@@ -398,6 +398,18 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, ...@@ -398,6 +398,18 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
__skb_tunnel_rx(skb, dev, net); __skb_tunnel_rx(skb, dev, net);
} }
static inline u32 dst_tclassid(const struct sk_buff *skb)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
const struct dst_entry *dst;
dst = skb_dst(skb);
if (dst)
return dst->tclassid;
#endif
return 0;
}
int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static inline int dst_discard(struct sk_buff *skb) static inline int dst_discard(struct sk_buff *skb)
{ {
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/u64_stats_sync.h> #include <linux/u64_stats_sync.h>
#include <linux/bitops.h>
#include <net/dsfield.h> #include <net/dsfield.h>
#include <net/gro_cells.h> #include <net/gro_cells.h>
#include <net/inet_ecn.h> #include <net/inet_ecn.h>
...@@ -57,6 +59,11 @@ struct ip_tunnel_key { ...@@ -57,6 +59,11 @@ struct ip_tunnel_key {
#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
/* Maximum tunnel options length. */
#define IP_TUNNEL_OPTS_MAX \
GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \
options_len) * BITS_PER_BYTE) - 1, 0)
struct ip_tunnel_info { struct ip_tunnel_info {
struct ip_tunnel_key key; struct ip_tunnel_key key;
#ifdef CONFIG_DST_CACHE #ifdef CONFIG_DST_CACHE
......
...@@ -1682,14 +1682,7 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { ...@@ -1682,14 +1682,7 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{ {
#ifdef CONFIG_IP_ROUTE_CLASSID return dst_tclassid((struct sk_buff *) (unsigned long) r1);
const struct dst_entry *dst;
dst = skb_dst((struct sk_buff *) (unsigned long) r1);
if (dst)
return dst->tclassid;
#endif
return 0;
} }
static const struct bpf_func_proto bpf_get_route_realm_proto = { static const struct bpf_func_proto bpf_get_route_realm_proto = {
...@@ -1911,8 +1904,6 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { ...@@ -1911,8 +1904,6 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
}; };
#define BPF_TUNLEN_MAX 255
static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5) static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
{ {
struct sk_buff *skb = (struct sk_buff *) (long) r1; struct sk_buff *skb = (struct sk_buff *) (long) r1;
...@@ -1922,7 +1913,7 @@ static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5) ...@@ -1922,7 +1913,7 @@ static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
return -EINVAL; return -EINVAL;
if (unlikely(size > BPF_TUNLEN_MAX)) if (unlikely(size > IP_TUNNEL_OPTS_MAX))
return -ENOMEM; return -ENOMEM;
ip_tunnel_info_opts_set(info, from, size); ip_tunnel_info_opts_set(info, from, size);
...@@ -1943,13 +1934,10 @@ static const struct bpf_func_proto * ...@@ -1943,13 +1934,10 @@ static const struct bpf_func_proto *
bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
{ {
if (!md_dst) { if (!md_dst) {
BUILD_BUG_ON(FIELD_SIZEOF(struct ip_tunnel_info,
options_len) != 1);
/* Race is not possible, since it's called from verifier /* Race is not possible, since it's called from verifier
* that is holding verifier mutex. * that is holding verifier mutex.
*/ */
md_dst = metadata_dst_alloc_percpu(BPF_TUNLEN_MAX, md_dst = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
GFP_KERNEL); GFP_KERNEL);
if (!md_dst) if (!md_dst)
return NULL; return NULL;
...@@ -2069,16 +2057,14 @@ static bool sk_filter_is_valid_access(int off, int size, ...@@ -2069,16 +2057,14 @@ static bool sk_filter_is_valid_access(int off, int size,
static bool tc_cls_act_is_valid_access(int off, int size, static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type) enum bpf_access_type type)
{ {
if (off == offsetof(struct __sk_buff, tc_classid))
return type == BPF_WRITE ? true : false;
if (type == BPF_WRITE) { if (type == BPF_WRITE) {
switch (off) { switch (off) {
case offsetof(struct __sk_buff, mark): case offsetof(struct __sk_buff, mark):
case offsetof(struct __sk_buff, tc_index): case offsetof(struct __sk_buff, tc_index):
case offsetof(struct __sk_buff, priority): case offsetof(struct __sk_buff, priority):
case offsetof(struct __sk_buff, cb[0]) ... case offsetof(struct __sk_buff, cb[0]) ...
offsetof(struct __sk_buff, cb[4]): offsetof(struct __sk_buff, cb[4]):
case offsetof(struct __sk_buff, tc_classid):
break; break;
default: default:
return false; return false;
...@@ -2195,8 +2181,10 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, ...@@ -2195,8 +2181,10 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
ctx_off -= offsetof(struct __sk_buff, tc_classid); ctx_off -= offsetof(struct __sk_buff, tc_classid);
ctx_off += offsetof(struct sk_buff, cb); ctx_off += offsetof(struct sk_buff, cb);
ctx_off += offsetof(struct qdisc_skb_cb, tc_classid); ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
WARN_ON(type != BPF_WRITE); if (type == BPF_WRITE)
*insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off); *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
else
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
break; break;
case offsetof(struct __sk_buff, tc_index): case offsetof(struct __sk_buff, tc_index):
......
...@@ -398,6 +398,12 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = { ...@@ -398,6 +398,12 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
void __init ip_tunnel_core_init(void) void __init ip_tunnel_core_init(void)
{ {
/* If you land here, make sure whether increasing ip_tunnel_info's
* options_len is a reasonable choice with its usage in front ends
* (f.e., it's part of flow keys, etc).
*/
BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255);
lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP); lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6); lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
} }
......
...@@ -55,7 +55,7 @@ struct ovs_tunnel_info { ...@@ -55,7 +55,7 @@ struct ovs_tunnel_info {
FIELD_SIZEOF(struct sw_flow_key, recirc_id)) FIELD_SIZEOF(struct sw_flow_key, recirc_id))
struct sw_flow_key { struct sw_flow_key {
u8 tun_opts[255]; u8 tun_opts[IP_TUNNEL_OPTS_MAX];
u8 tun_opts_len; u8 tun_opts_len;
struct ip_tunnel_key tun_key; /* Encapsulating tunnel key. */ struct ip_tunnel_key tun_key; /* Encapsulating tunnel key. */
struct { struct {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment