Commit 76b5e303 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'misc-improvements'

Daniel Borkmann says:

====================
Last batch of misc patches I had in queue: first one removes some left-over
bits from ULP, second is a fix in the verifier where we wrongly use register
number as type to fetch the string for the dump, third disables xadd on flow
keys and subsequent one removes the flow key type from check_helper_mem_access()
as they cannot be passed into any helper as of today. Next one lets map push,
pop, peek avoid having to go through retpoline, and last one has a couple of
minor fixes and cleanups for the ring buffer walk.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 2576b967 3dca2115
...@@ -2051,11 +2051,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); ...@@ -2051,11 +2051,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
#define TCP_ULP_MAX 128 #define TCP_ULP_MAX 128
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
enum {
TCP_ULP_TLS,
TCP_ULP_BPF,
};
struct tcp_ulp_ops { struct tcp_ulp_ops {
struct list_head list; struct list_head list;
...@@ -2064,9 +2059,7 @@ struct tcp_ulp_ops { ...@@ -2064,9 +2059,7 @@ struct tcp_ulp_ops {
/* cleanup ulp */ /* cleanup ulp */
void (*release)(struct sock *sk); void (*release)(struct sock *sk);
int uid;
char name[TCP_ULP_NAME_MAX]; char name[TCP_ULP_NAME_MAX];
bool user_visible;
struct module *owner; struct module *owner;
}; };
int tcp_register_ulp(struct tcp_ulp_ops *type); int tcp_register_ulp(struct tcp_ulp_ops *type);
......
...@@ -1528,14 +1528,19 @@ static bool __is_pointer_value(bool allow_ptr_leaks, ...@@ -1528,14 +1528,19 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE; return reg->type != SCALAR_VALUE;
} }
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
{
return cur_regs(env) + regno;
}
static bool is_pointer_value(struct bpf_verifier_env *env, int regno) static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{ {
return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
} }
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{ {
const struct bpf_reg_state *reg = cur_regs(env) + regno; const struct bpf_reg_state *reg = reg_state(env, regno);
return reg->type == PTR_TO_CTX || return reg->type == PTR_TO_CTX ||
reg->type == PTR_TO_SOCKET; reg->type == PTR_TO_SOCKET;
...@@ -1543,11 +1548,19 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) ...@@ -1543,11 +1548,19 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{ {
const struct bpf_reg_state *reg = cur_regs(env) + regno; const struct bpf_reg_state *reg = reg_state(env, regno);
return type_is_pkt_pointer(reg->type); return type_is_pkt_pointer(reg->type);
} }
static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
return reg->type == PTR_TO_FLOW_KEYS;
}
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, const struct bpf_reg_state *reg,
int off, int size, bool strict) int off, int size, bool strict)
...@@ -1956,9 +1969,11 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins ...@@ -1956,9 +1969,11 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
} }
if (is_ctx_reg(env, insn->dst_reg) || if (is_ctx_reg(env, insn->dst_reg) ||
is_pkt_reg(env, insn->dst_reg)) { is_pkt_reg(env, insn->dst_reg) ||
is_flow_key_reg(env, insn->dst_reg)) {
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
insn->dst_reg, reg_type_str[insn->dst_reg]); insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES; return -EACCES;
} }
...@@ -1983,7 +1998,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, ...@@ -1983,7 +1998,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed, int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta) struct bpf_call_arg_meta *meta)
{ {
struct bpf_reg_state *reg = cur_regs(env) + regno; struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg); struct bpf_func_state *state = func(env, reg);
int off, i, slot, spi; int off, i, slot, spi;
...@@ -2062,8 +2077,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, ...@@ -2062,8 +2077,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
case PTR_TO_PACKET_META: case PTR_TO_PACKET_META:
return check_packet_access(env, regno, reg->off, access_size, return check_packet_access(env, regno, reg->off, access_size,
zero_size_allowed); zero_size_allowed);
case PTR_TO_FLOW_KEYS:
return check_flow_keys_access(env, reg->off, access_size);
case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE:
return check_map_access(env, regno, reg->off, access_size, return check_map_access(env, regno, reg->off, access_size,
zero_size_allowed); zero_size_allowed);
...@@ -5264,7 +5277,8 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5264,7 +5277,8 @@ static int do_check(struct bpf_verifier_env *env)
if (is_ctx_reg(env, insn->dst_reg)) { if (is_ctx_reg(env, insn->dst_reg)) {
verbose(env, "BPF_ST stores into R%d %s is not allowed\n", verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
insn->dst_reg, reg_type_str[insn->dst_reg]); insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES; return -EACCES;
} }
...@@ -6164,7 +6178,10 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -6164,7 +6178,10 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
if (prog->jit_requested && BITS_PER_LONG == 64 && if (prog->jit_requested && BITS_PER_LONG == 64 &&
(insn->imm == BPF_FUNC_map_lookup_elem || (insn->imm == BPF_FUNC_map_lookup_elem ||
insn->imm == BPF_FUNC_map_update_elem || insn->imm == BPF_FUNC_map_update_elem ||
insn->imm == BPF_FUNC_map_delete_elem)) { insn->imm == BPF_FUNC_map_delete_elem ||
insn->imm == BPF_FUNC_map_push_elem ||
insn->imm == BPF_FUNC_map_pop_elem ||
insn->imm == BPF_FUNC_map_peek_elem)) {
aux = &env->insn_aux_data[i + delta]; aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux)) if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm; goto patch_call_imm;
...@@ -6197,6 +6214,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -6197,6 +6214,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
BUILD_BUG_ON(!__same_type(ops->map_update_elem, BUILD_BUG_ON(!__same_type(ops->map_update_elem,
(int (*)(struct bpf_map *map, void *key, void *value, (int (*)(struct bpf_map *map, void *key, void *value,
u64 flags))NULL)); u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_push_elem,
(int (*)(struct bpf_map *map, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
switch (insn->imm) { switch (insn->imm) {
case BPF_FUNC_map_lookup_elem: case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
...@@ -6210,6 +6235,18 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -6210,6 +6235,18 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
__bpf_call_base; __bpf_call_base;
continue; continue;
case BPF_FUNC_map_push_elem:
insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_pop_elem:
insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_peek_elem:
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
__bpf_call_base;
continue;
} }
goto patch_call_imm; goto patch_call_imm;
......
...@@ -715,8 +715,6 @@ EXPORT_SYMBOL(tls_unregister_device); ...@@ -715,8 +715,6 @@ EXPORT_SYMBOL(tls_unregister_device);
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls", .name = "tls",
.uid = TCP_ULP_TLS,
.user_visible = true,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.init = tls_init, .init = tls_init,
}; };
......
...@@ -50,15 +50,17 @@ static void int_exit(int signo) ...@@ -50,15 +50,17 @@ static void int_exit(int signo)
stop = true; stop = true;
} }
static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv) static enum bpf_perf_event_ret
print_bpf_output(struct perf_event_header *event, void *private_data)
{ {
struct event_ring_info *ring = priv; struct perf_event_sample *e = container_of(event, struct perf_event_sample,
struct perf_event_sample *e = event; header);
struct event_ring_info *ring = private_data;
struct { struct {
struct perf_event_header header; struct perf_event_header header;
__u64 id; __u64 id;
__u64 lost; __u64 lost;
} *lost = event; } *lost = (typeof(lost))event;
if (json_output) { if (json_output) {
jsonw_start_object(json_wtr); jsonw_start_object(json_wtr);
......
...@@ -2415,56 +2415,47 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, ...@@ -2415,56 +2415,47 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
} }
enum bpf_perf_event_ret enum bpf_perf_event_ret
bpf_perf_event_read_simple(void *mem, unsigned long size, bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
unsigned long page_size, void **buf, size_t *buf_len, void **copy_mem, size_t *copy_size,
bpf_perf_event_print_t fn, void *priv) bpf_perf_event_print_t fn, void *private_data)
{ {
struct perf_event_mmap_page *header = mem; struct perf_event_mmap_page *header = mmap_mem;
__u64 data_head = ring_buffer_read_head(header); __u64 data_head = ring_buffer_read_head(header);
__u64 data_tail = header->data_tail; __u64 data_tail = header->data_tail;
int ret = LIBBPF_PERF_EVENT_ERROR; void *base = ((__u8 *)header) + page_size;
void *base, *begin, *end; int ret = LIBBPF_PERF_EVENT_CONT;
struct perf_event_header *ehdr;
if (data_head == data_tail) size_t ehdr_size;
return LIBBPF_PERF_EVENT_CONT;
while (data_head != data_tail) {
base = ((char *)header) + page_size; ehdr = base + (data_tail & (mmap_size - 1));
ehdr_size = ehdr->size;
begin = base + data_tail % size;
end = base + data_head % size; if (((void *)ehdr) + ehdr_size > base + mmap_size) {
void *copy_start = ehdr;
while (begin != end) { size_t len_first = base + mmap_size - copy_start;
struct perf_event_header *ehdr; size_t len_secnd = ehdr_size - len_first;
ehdr = begin; if (*copy_size < ehdr_size) {
if (begin + ehdr->size > base + size) { free(*copy_mem);
long len = base + size - begin; *copy_mem = malloc(ehdr_size);
if (!*copy_mem) {
if (*buf_len < ehdr->size) { *copy_size = 0;
free(*buf);
*buf = malloc(ehdr->size);
if (!*buf) {
ret = LIBBPF_PERF_EVENT_ERROR; ret = LIBBPF_PERF_EVENT_ERROR;
break; break;
} }
*buf_len = ehdr->size; *copy_size = ehdr_size;
} }
memcpy(*buf, begin, len); memcpy(*copy_mem, copy_start, len_first);
memcpy(*buf + len, base, ehdr->size - len); memcpy(*copy_mem + len_first, base, len_secnd);
ehdr = (void *)*buf; ehdr = *copy_mem;
begin = base + ehdr->size - len;
} else if (begin + ehdr->size == base + size) {
begin = base;
} else {
begin += ehdr->size;
} }
ret = fn(ehdr, priv); ret = fn(ehdr, private_data);
data_tail += ehdr_size;
if (ret != LIBBPF_PERF_EVENT_CONT) if (ret != LIBBPF_PERF_EVENT_CONT)
break; break;
data_tail += ehdr->size;
} }
ring_buffer_write_tail(header, data_tail); ring_buffer_write_tail(header, data_tail);
......
...@@ -297,13 +297,14 @@ enum bpf_perf_event_ret { ...@@ -297,13 +297,14 @@ enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_CONT = -2, LIBBPF_PERF_EVENT_CONT = -2,
}; };
typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(void *event, struct perf_event_header;
void *priv); typedef enum bpf_perf_event_ret
LIBBPF_API int bpf_perf_event_read_simple(void *mem, unsigned long size, (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
unsigned long page_size, void *private_data);
void **buf, size_t *buf_len, LIBBPF_API enum bpf_perf_event_ret
bpf_perf_event_print_t fn, bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
void *priv); void **copy_mem, size_t *copy_size,
bpf_perf_event_print_t fn, void *private_data);
struct nlattr; struct nlattr;
typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
......
...@@ -3430,7 +3430,7 @@ static struct bpf_test tests[] = { ...@@ -3430,7 +3430,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "BPF_ST stores into R1 inv is not allowed", .errstr = "BPF_ST stores into R1 ctx is not allowed",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
}, },
...@@ -3442,7 +3442,7 @@ static struct bpf_test tests[] = { ...@@ -3442,7 +3442,7 @@ static struct bpf_test tests[] = {
BPF_REG_0, offsetof(struct __sk_buff, mark), 0), BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "BPF_XADD stores into R1 inv is not allowed", .errstr = "BPF_XADD stores into R1 ctx is not allowed",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
}, },
...@@ -5670,7 +5670,7 @@ static struct bpf_test tests[] = { ...@@ -5670,7 +5670,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R2 leaks addr into mem", .errstr_unpriv = "R2 leaks addr into mem",
.result_unpriv = REJECT, .result_unpriv = REJECT,
.result = REJECT, .result = REJECT,
.errstr = "BPF_XADD stores into R1 inv is not allowed", .errstr = "BPF_XADD stores into R1 ctx is not allowed",
}, },
{ {
"leak pointer into ctx 2", "leak pointer into ctx 2",
...@@ -5685,7 +5685,7 @@ static struct bpf_test tests[] = { ...@@ -5685,7 +5685,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R10 leaks addr into mem", .errstr_unpriv = "R10 leaks addr into mem",
.result_unpriv = REJECT, .result_unpriv = REJECT,
.result = REJECT, .result = REJECT,
.errstr = "BPF_XADD stores into R1 inv is not allowed", .errstr = "BPF_XADD stores into R1 ctx is not allowed",
}, },
{ {
"leak pointer into ctx 3", "leak pointer into ctx 3",
...@@ -12634,7 +12634,7 @@ static struct bpf_test tests[] = { ...@@ -12634,7 +12634,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .result = REJECT,
.errstr = "BPF_XADD stores into R2 ctx", .errstr = "BPF_XADD stores into R2 pkt is not allowed",
.prog_type = BPF_PROG_TYPE_XDP, .prog_type = BPF_PROG_TYPE_XDP,
}, },
{ {
......
...@@ -125,10 +125,11 @@ struct perf_event_sample { ...@@ -125,10 +125,11 @@ struct perf_event_sample {
char data[]; char data[];
}; };
static enum bpf_perf_event_ret bpf_perf_event_print(void *event, void *priv) static enum bpf_perf_event_ret
bpf_perf_event_print(struct perf_event_header *hdr, void *private_data)
{ {
struct perf_event_sample *e = event; struct perf_event_sample *e = (struct perf_event_sample *)hdr;
perf_event_print_fn fn = priv; perf_event_print_fn fn = private_data;
int ret; int ret;
if (e->header.type == PERF_RECORD_SAMPLE) { if (e->header.type == PERF_RECORD_SAMPLE) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment