Commit 53c4ce02 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-misc-next'

Daniel Borkmann says:

====================
BPF cleanups and misc updates

This patch set adds couple of cleanups in first few patches,
exposes owner_prog_type for array maps as well as mlocked mem
for maps in fdinfo, allows for mount permissions in fs and
fixes various outstanding issues in selftests and samples.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e3739099 e00c7b21
......@@ -1518,7 +1518,7 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
xdp.data = data;
xdp.data_end = data + len;
return BPF_PROG_RUN(prog, (void *)&xdp);
return BPF_PROG_RUN(prog, &xdp);
}
/**
......
......@@ -408,8 +408,8 @@ struct bpf_prog {
enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
/* Instructions for interpreter */
union {
struct sock_filter insns[0];
......@@ -504,7 +504,7 @@ static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
u32 ret;
rcu_read_lock();
ret = BPF_PROG_RUN(prog, (void *)xdp);
ret = BPF_PROG_RUN(prog, xdp);
rcu_read_unlock();
return ret;
......
......@@ -44,4 +44,20 @@ static inline int arp_hdr_len(struct net_device *dev)
return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
}
}
static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
{
switch (dev->type) {
case ARPHRD_TUNNEL:
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
return false;
default:
return true;
}
}
#endif /* _LINUX_IF_ARP_H */
......@@ -18,6 +18,7 @@
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/parser.h>
#include <linux/filter.h>
#include <linux/bpf.h>
......@@ -364,15 +365,66 @@ static void bpf_evict_inode(struct inode *inode)
static const struct super_operations bpf_super_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.show_options = generic_show_options,
.evict_inode = bpf_evict_inode,
};
enum {
OPT_MODE,
OPT_ERR,
};
static const match_table_t bpf_mount_tokens = {
{ OPT_MODE, "mode=%o" },
{ OPT_ERR, NULL },
};
struct bpf_mount_opts {
umode_t mode;
};
static int bpf_parse_options(char *data, struct bpf_mount_opts *opts)
{
substring_t args[MAX_OPT_ARGS];
int option, token;
char *ptr;
opts->mode = S_IRWXUGO;
while ((ptr = strsep(&data, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, bpf_mount_tokens, args);
switch (token) {
case OPT_MODE:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->mode = option & S_IALLUGO;
break;
/* We might like to report bad mount options here, but
* traditionally we've ignored all mount options, so we'd
* better continue to ignore non-existing options for bpf.
*/
}
}
return 0;
}
static int bpf_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr bpf_rfiles[] = { { "" } };
struct bpf_mount_opts opts;
struct inode *inode;
int ret;
save_mount_options(sb, data);
ret = bpf_parse_options(data, &opts);
if (ret)
return ret;
ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
if (ret)
return ret;
......@@ -382,7 +434,7 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
inode = sb->s_root->d_inode;
inode->i_op = &bpf_dir_iops;
inode->i_mode &= ~S_IALLUGO;
inode->i_mode |= S_ISVTX | S_IRWXUGO;
inode->i_mode |= S_ISVTX | opts.mode;
return 0;
}
......
......@@ -138,18 +138,31 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_map *map = filp->private_data;
const struct bpf_array *array;
u32 owner_prog_type = 0;
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
array = container_of(map, struct bpf_array, map);
owner_prog_type = array->owner_prog_type;
}
seq_printf(m,
"map_type:\t%u\n"
"key_size:\t%u\n"
"value_size:\t%u\n"
"max_entries:\t%u\n"
"map_flags:\t%#x\n",
"map_flags:\t%#x\n"
"memlock:\t%llu\n",
map->map_type,
map->key_size,
map->value_size,
map->max_entries,
map->map_flags);
map->map_flags,
map->pages * 1ULL << PAGE_SHIFT);
if (owner_prog_type)
seq_printf(m, "owner_prog_type:\t%u\n",
owner_prog_type);
}
#endif
......
......@@ -7726,7 +7726,7 @@ static void bpf_overflow_handler(struct perf_event *event,
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
goto out;
rcu_read_lock();
ret = BPF_PROG_RUN(event->prog, (void *)&ctx);
ret = BPF_PROG_RUN(event->prog, &ctx);
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
......
......@@ -195,7 +195,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd)
* value always takes priority (ignoring the DATA).
*/
for (; f; f = f->prev) {
u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);
u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret;
......
......@@ -30,6 +30,7 @@
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/gfp.h>
#include <net/ip.h>
#include <net/protocol.h>
......@@ -1696,17 +1697,10 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
switch (dev->type) {
case ARPHRD_TUNNEL:
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
return __bpf_redirect_no_mac(skb, dev, flags);
default:
if (dev_is_mac_header_xmit(dev))
return __bpf_redirect_common(skb, dev, flags);
}
else
return __bpf_redirect_no_mac(skb, dev, flags);
}
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
......
......@@ -28,7 +28,6 @@ struct tcf_bpf_cfg {
struct bpf_prog *filter;
struct sock_filter *bpf_ops;
const char *bpf_name;
u32 bpf_fd;
u16 bpf_num_ops;
bool is_ebpf;
};
......@@ -118,9 +117,6 @@ static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
struct sk_buff *skb)
{
if (nla_put_u32(skb, TCA_ACT_BPF_FD, prog->bpf_fd))
return -EMSGSIZE;
if (prog->bpf_name &&
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
......@@ -233,7 +229,6 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
}
}
cfg->bpf_fd = bpf_fd;
cfg->bpf_name = name;
cfg->filter = fp;
cfg->is_ebpf = true;
......@@ -332,8 +327,6 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
if (cfg.bpf_num_ops)
prog->bpf_num_ops = cfg.bpf_num_ops;
if (cfg.bpf_fd)
prog->bpf_fd = cfg.bpf_fd;
prog->tcf_action = parm->action;
rcu_assign_pointer(prog->filter, cfg.filter);
......
......@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
......@@ -73,20 +74,6 @@ static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
static unsigned int mirred_net_id;
static struct tc_action_ops act_mirred_ops;
static bool dev_is_mac_header_xmit(const struct net_device *dev)
{
switch (dev->type) {
case ARPHRD_TUNNEL:
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
return false;
}
return true;
}
static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
int bind)
......
......@@ -45,10 +45,7 @@ struct cls_bpf_prog {
u32 gen_flags;
struct tcf_exts exts;
u32 handle;
union {
u32 bpf_fd;
u16 bpf_num_ops;
};
struct sock_filter *bpf_ops;
const char *bpf_name;
struct tcf_proto *tp;
......@@ -377,7 +374,6 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
}
prog->bpf_ops = NULL;
prog->bpf_fd = bpf_fd;
prog->bpf_name = name;
prog->filter = fp;
......@@ -561,9 +557,6 @@ static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
struct sk_buff *skb)
{
if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
return -EMSGSIZE;
if (prog->bpf_name &&
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
......
......@@ -91,6 +91,7 @@ always += trace_event_kern.o
always += sampleip_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
HOSTCFLAGS += -I$(objtree)/tools/testing/selftests/bpf/
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
HOSTLOADLIBES_fds_example += -lelf
......
......@@ -16,10 +16,13 @@
#include <sched.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <stdlib.h>
#include <time.h>
#include "libbpf.h"
#include "bpf_util.h"
#define min(a, b) ((a) < (b) ? (a) : (b))
#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
......@@ -510,7 +513,7 @@ int main(int argc, char **argv)
srand(time(NULL));
nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
nr_cpus = bpf_num_possible_cpus();
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
......
......@@ -4,8 +4,10 @@
#include <signal.h>
#include <linux/bpf.h>
#include <string.h>
#include "libbpf.h"
#include "bpf_load.h"
#include "bpf_util.h"
#define MAX_INDEX 64
#define MAX_STARS 38
......@@ -36,8 +38,8 @@ struct hist_key {
static void print_hist_for_pid(int fd, void *task)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct hist_key key = {}, next_key;
unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
long values[nr_cpus];
char starstr[MAX_STARS];
long value;
......
......@@ -11,8 +11,10 @@
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
#include "libbpf.h"
#include "bpf_load.h"
#include "bpf_util.h"
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
......@@ -20,7 +22,7 @@
static void clear_stats(int fd)
{
unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
unsigned int nr_cpus = bpf_num_possible_cpus();
__u64 values[nr_cpus];
__u32 key;
......@@ -77,7 +79,7 @@ static void print_banner(void)
static void print_hist(int fd)
{
unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
unsigned int nr_cpus = bpf_num_possible_cpus();
__u64 total_events = 0;
long values[nr_cpus];
__u64 max_cnt = 0;
......
......@@ -15,7 +15,9 @@
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include "bpf_load.h"
#include "bpf_util.h"
#include "libbpf.h"
static int set_link_xdp_fd(int ifindex, int fd)
......@@ -120,7 +122,7 @@ static void int_exit(int sig)
*/
static void poll_stats(int interval)
{
unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
unsigned int nr_cpus = bpf_num_possible_cpus();
const unsigned int nr_keys = 256;
__u64 values[nr_cpus], prev[nr_keys][nr_cpus];
__u32 key;
......
#ifndef __BPF_UTIL__
#define __BPF_UTIL__
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
static inline unsigned int bpf_num_possible_cpus(void)
{
static const char *fcpu = "/sys/devices/system/cpu/possible";
unsigned int start, end, possible_cpus = 0;
char buff[128];
FILE *fp;
fp = fopen(fcpu, "r");
if (!fp) {
printf("Failed to open %s: '%s'!\n", fcpu, strerror(errno));
exit(1);
}
while (fgets(buff, sizeof(buff), fp)) {
if (sscanf(buff, "%u-%u", &start, &end) == 2) {
possible_cpus = start == 0 ? end + 1 : 0;
break;
}
}
fclose(fp);
if (!possible_cpus) {
printf("Failed to retrieve # possible CPUs!\n");
exit(1);
}
return possible_cpus;
}
#endif /* __BPF_UTIL__ */
......@@ -12,10 +12,14 @@
#include <string.h>
#include <assert.h>
#include <sched.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <time.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include "bpf_sys.h"
#include "bpf_util.h"
#define LOCAL_FREE_TARGET (128)
#define PERCPU_FREE_TARGET (16)
......@@ -559,7 +563,7 @@ int main(int argc, char **argv)
assert(!setrlimit(RLIMIT_MEMLOCK, &r));
nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
nr_cpus = bpf_num_possible_cpus();
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
......
......@@ -22,6 +22,7 @@
#include <linux/bpf.h>
#include "bpf_sys.h"
#include "bpf_util.h"
static int map_flags;
......@@ -110,7 +111,7 @@ static void test_hashmap(int task, void *data)
static void test_hashmap_percpu(int task, void *data)
{
unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
unsigned int nr_cpus = bpf_num_possible_cpus();
long long value[nr_cpus];
long long key, next_key;
int expected_key_mask = 0;
......@@ -258,7 +259,7 @@ static void test_arraymap(int task, void *data)
static void test_arraymap_percpu(int task, void *data)
{
unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
unsigned int nr_cpus = bpf_num_possible_cpus();
int key, next_key, fd, i;
long values[nr_cpus];
......@@ -313,7 +314,7 @@ static void test_arraymap_percpu(int task, void *data)
static void test_arraymap_percpu_many_keys(void)
{
unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
unsigned int nr_cpus = bpf_num_possible_cpus();
unsigned int nr_keys = 20000;
long values[nr_cpus];
int key, fd, i;
......
......@@ -285,7 +285,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
BPF_EXIT_INSN(),
},
.errstr = "invalid func 1234567",
.errstr = "invalid func unknown#1234567",
.result = REJECT,
},
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment