Commit 3c30819d authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-09-27

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix libbpf's BTF dumper to not skip anonymous enum definitions, from Andrii.

2) Fix BTF verifier issues when handling the BTF of vmlinux, from Alexei.

3) Fix nested calls into bpf_event_output() from TCP sockops BPF
   programs, from Allan.

4) Fix NULL pointer dereference in AF_XDP's xsk map creation when
   allocation fails, from Jonathan.

5) Remove unneeded 64 byte alignment requirement of the AF_XDP UMEM
   headroom, from Bjorn.

6) Remove unused XDP_OPTIONS getsockopt() call which results in an error
   on older kernels, from Toke.

7) Fix a client/server race in tcp_rtt BPF kselftest case, from Stanislav.

8) Fix indentation issue in BTF's btf_enum_check_kflag_member(), from Colin.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5c7ff181 768fb61f
...@@ -22,9 +22,9 @@ struct btf_header { ...@@ -22,9 +22,9 @@ struct btf_header {
}; };
/* Max # of type identifier */ /* Max # of type identifier */
#define BTF_MAX_TYPE 0x0000ffff #define BTF_MAX_TYPE 0x000fffff
/* Max offset into the string section */ /* Max offset into the string section */
#define BTF_MAX_NAME_OFFSET 0x0000ffff #define BTF_MAX_NAME_OFFSET 0x00ffffff
/* Max # of struct/union/enum members or func args */ /* Max # of struct/union/enum members or func args */
#define BTF_MAX_VLEN 0xffff #define BTF_MAX_VLEN 0xffff
......
...@@ -2332,7 +2332,7 @@ static int btf_enum_check_kflag_member(struct btf_verifier_env *env, ...@@ -2332,7 +2332,7 @@ static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
if (BITS_PER_BYTE_MASKED(struct_bits_off)) { if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
btf_verifier_log_member(env, struct_type, member, btf_verifier_log_member(env, struct_type, member,
"Member is not byte aligned"); "Member is not byte aligned");
return -EINVAL; return -EINVAL;
} }
nr_bits = int_bitsize; nr_bits = int_bitsize;
...@@ -2377,9 +2377,8 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env, ...@@ -2377,9 +2377,8 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL; return -EINVAL;
} }
if (t->size != sizeof(int)) { if (t->size > 8 || !is_power_of_2(t->size)) {
btf_verifier_log_type(env, t, "Expected size:%zu", btf_verifier_log_type(env, t, "Unexpected size");
sizeof(int));
return -EINVAL; return -EINVAL;
} }
......
...@@ -37,7 +37,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, ...@@ -37,7 +37,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN); node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
if (!node) if (!node)
return NULL; return ERR_PTR(-ENOMEM);
err = xsk_map_inc(map); err = xsk_map_inc(map);
if (err) { if (err) {
......
...@@ -500,14 +500,17 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { ...@@ -500,14 +500,17 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
}; };
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); struct bpf_nested_pt_regs {
struct pt_regs regs[3];
};
static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
{ {
struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
struct perf_raw_frag frag = { struct perf_raw_frag frag = {
.copy = ctx_copy, .copy = ctx_copy,
.size = ctx_size, .size = ctx_size,
...@@ -522,12 +525,25 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ...@@ -522,12 +525,25 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
.data = meta, .data = meta,
}, },
}; };
struct perf_sample_data *sd;
struct pt_regs *regs;
u64 ret;
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
ret = -EBUSY;
goto out;
}
sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
perf_fetch_caller_regs(regs); perf_fetch_caller_regs(regs);
perf_sample_data_init(sd, 0, 0); perf_sample_data_init(sd, 0, 0);
sd->raw = &raw; sd->raw = &raw;
return __bpf_perf_event_output(regs, map, flags, sd); ret = __bpf_perf_event_output(regs, map, flags, sd);
out:
this_cpu_dec(bpf_event_output_nest_level);
return ret;
} }
BPF_CALL_0(bpf_get_current_task) BPF_CALL_0(bpf_get_current_task)
......
...@@ -383,8 +383,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) ...@@ -383,8 +383,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
return -EINVAL; return -EINVAL;
} }
headroom = ALIGN(headroom, 64);
size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
if (size_chk < 0) if (size_chk < 0)
return -EINVAL; return -EINVAL;
......
...@@ -48,6 +48,8 @@ struct btf_dump_type_aux_state { ...@@ -48,6 +48,8 @@ struct btf_dump_type_aux_state {
__u8 fwd_emitted: 1; __u8 fwd_emitted: 1;
/* whether unique non-duplicate name was already assigned */ /* whether unique non-duplicate name was already assigned */
__u8 name_resolved: 1; __u8 name_resolved: 1;
/* whether type is referenced from any other type */
__u8 referenced: 1;
}; };
struct btf_dump { struct btf_dump {
...@@ -173,6 +175,7 @@ void btf_dump__free(struct btf_dump *d) ...@@ -173,6 +175,7 @@ void btf_dump__free(struct btf_dump *d)
free(d); free(d);
} }
static int btf_dump_mark_referenced(struct btf_dump *d);
static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr); static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id); static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
...@@ -213,6 +216,11 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) ...@@ -213,6 +216,11 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
/* VOID is special */ /* VOID is special */
d->type_states[0].order_state = ORDERED; d->type_states[0].order_state = ORDERED;
d->type_states[0].emit_state = EMITTED; d->type_states[0].emit_state = EMITTED;
/* eagerly determine referenced types for anon enums */
err = btf_dump_mark_referenced(d);
if (err)
return err;
} }
d->emit_queue_cnt = 0; d->emit_queue_cnt = 0;
...@@ -226,6 +234,79 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) ...@@ -226,6 +234,79 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
return 0; return 0;
} }
/*
* Mark all types that are referenced from any other type. This is used to
* determine top-level anonymous enums that need to be emitted as an
* independent type declarations.
* Anonymous enums come in two flavors: either embedded in a struct's field
* definition, in which case they have to be declared inline as part of field
* type declaration; or as a top-level anonymous enum, typically used for
* declaring global constants. It's impossible to distinguish between two
* without knowning whether given enum type was referenced from other type:
* top-level anonymous enum won't be referenced by anything, while embedded
* one will.
*/
static int btf_dump_mark_referenced(struct btf_dump *d)
{
int i, j, n = btf__get_nr_types(d->btf);
const struct btf_type *t;
__u16 vlen;
for (i = 1; i <= n; i++) {
t = btf__type_by_id(d->btf, i);
vlen = btf_vlen(t);
switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
break;
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
d->type_states[t->type].referenced = 1;
break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = btf_array(t);
d->type_states[a->index_type].referenced = 1;
d->type_states[a->type].referenced = 1;
break;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
const struct btf_member *m = btf_members(t);
for (j = 0; j < vlen; j++, m++)
d->type_states[m->type].referenced = 1;
break;
}
case BTF_KIND_FUNC_PROTO: {
const struct btf_param *p = btf_params(t);
for (j = 0; j < vlen; j++, p++)
d->type_states[p->type].referenced = 1;
break;
}
case BTF_KIND_DATASEC: {
const struct btf_var_secinfo *v = btf_var_secinfos(t);
for (j = 0; j < vlen; j++, v++)
d->type_states[v->type].referenced = 1;
break;
}
default:
return -EINVAL;
}
}
return 0;
}
static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id) static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
{ {
__u32 *new_queue; __u32 *new_queue;
...@@ -395,7 +476,12 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) ...@@ -395,7 +476,12 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
} }
case BTF_KIND_ENUM: case BTF_KIND_ENUM:
case BTF_KIND_FWD: case BTF_KIND_FWD:
if (t->name_off != 0) { /*
* non-anonymous or non-referenced enums are top-level
* declarations and should be emitted. Same logic can be
* applied to FWDs, it won't hurt anyways.
*/
if (t->name_off != 0 || !tstate->referenced) {
err = btf_dump_add_emit_queue_id(d, id); err = btf_dump_add_emit_queue_id(d, id);
if (err) if (err)
return err; return err;
...@@ -536,11 +622,6 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) ...@@ -536,11 +622,6 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
t = btf__type_by_id(d->btf, id); t = btf__type_by_id(d->btf, id);
kind = btf_kind(t); kind = btf_kind(t);
if (top_level_def && t->name_off == 0) {
pr_warning("unexpected nameless definition, id:[%u]\n", id);
return;
}
if (tstate->emit_state == EMITTING) { if (tstate->emit_state == EMITTING) {
if (tstate->fwd_emitted) if (tstate->fwd_emitted)
return; return;
...@@ -1167,6 +1248,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, ...@@ -1167,6 +1248,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
return; return;
} }
next_id = decls->ids[decls->cnt - 1];
next_t = btf__type_by_id(d->btf, next_id); next_t = btf__type_by_id(d->btf, next_id);
multidim = btf_is_array(next_t); multidim = btf_is_array(next_t);
/* we need space if we have named non-pointer */ /* we need space if we have named non-pointer */
......
...@@ -65,7 +65,6 @@ struct xsk_socket { ...@@ -65,7 +65,6 @@ struct xsk_socket {
int xsks_map_fd; int xsks_map_fd;
__u32 queue_id; __u32 queue_id;
char ifname[IFNAMSIZ]; char ifname[IFNAMSIZ];
bool zc;
}; };
struct xsk_nl_info { struct xsk_nl_info {
...@@ -491,7 +490,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, ...@@ -491,7 +490,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
void *rx_map = NULL, *tx_map = NULL; void *rx_map = NULL, *tx_map = NULL;
struct sockaddr_xdp sxdp = {}; struct sockaddr_xdp sxdp = {};
struct xdp_mmap_offsets off; struct xdp_mmap_offsets off;
struct xdp_options opts;
struct xsk_socket *xsk; struct xsk_socket *xsk;
socklen_t optlen; socklen_t optlen;
int err; int err;
...@@ -611,15 +609,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, ...@@ -611,15 +609,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
xsk->prog_fd = -1; xsk->prog_fd = -1;
optlen = sizeof(opts);
err = getsockopt(xsk->fd, SOL_XDP, XDP_OPTIONS, &opts, &optlen);
if (err) {
err = -errno;
goto out_mmap_tx;
}
xsk->zc = opts.flags & XDP_OPTIONS_ZEROCOPY;
if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
err = xsk_setup_xdp_prog(xsk); err = xsk_setup_xdp_prog(xsk);
if (err) if (err)
......
...@@ -203,14 +203,24 @@ static int start_server(void) ...@@ -203,14 +203,24 @@ static int start_server(void)
return fd; return fd;
} }
static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER;
static void *server_thread(void *arg) static void *server_thread(void *arg)
{ {
struct sockaddr_storage addr; struct sockaddr_storage addr;
socklen_t len = sizeof(addr); socklen_t len = sizeof(addr);
int fd = *(int *)arg; int fd = *(int *)arg;
int client_fd; int client_fd;
int err;
err = listen(fd, 1);
pthread_mutex_lock(&server_started_mtx);
pthread_cond_signal(&server_started);
pthread_mutex_unlock(&server_started_mtx);
if (CHECK_FAIL(listen(fd, 1)) < 0) { if (CHECK_FAIL(err < 0)) {
perror("Failed to listed on socket"); perror("Failed to listed on socket");
return NULL; return NULL;
} }
...@@ -248,7 +258,14 @@ void test_tcp_rtt(void) ...@@ -248,7 +258,14 @@ void test_tcp_rtt(void)
if (CHECK_FAIL(server_fd < 0)) if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd; goto close_cgroup_fd;
pthread_create(&tid, NULL, server_thread, (void *)&server_fd); if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
(void *)&server_fd)))
goto close_cgroup_fd;
pthread_mutex_lock(&server_started_mtx);
pthread_cond_wait(&server_started, &server_started_mtx);
pthread_mutex_unlock(&server_started_mtx);
CHECK_FAIL(run_test(cgroup_fd, server_fd)); CHECK_FAIL(run_test(cgroup_fd, server_fd));
close(server_fd); close(server_fd);
close_cgroup_fd: close_cgroup_fd:
......
...@@ -413,7 +413,10 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, ...@@ -413,7 +413,10 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
#else #else
#pragma unroll #pragma unroll
#endif #endif
for (int i = 0; i < STROBE_MAX_MAP_ENTRIES && i < map.cnt; ++i) { for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) {
if (i >= map.cnt)
break;
descr->key_lens[i] = 0; descr->key_lens[i] = 0;
len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
map.entries[i].key); map.entries[i].key);
......
...@@ -1385,7 +1385,6 @@ static int fixup_sysctl_value(const char *buf, size_t buf_len, ...@@ -1385,7 +1385,6 @@ static int fixup_sysctl_value(const char *buf, size_t buf_len,
uint8_t raw[sizeof(uint64_t)]; uint8_t raw[sizeof(uint64_t)];
uint64_t num; uint64_t num;
} value = {}; } value = {};
uint8_t c, i;
if (buf_len > sizeof(value)) { if (buf_len > sizeof(value)) {
log_err("Value is too big (%zd) to use in fixup", buf_len); log_err("Value is too big (%zd) to use in fixup", buf_len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment