Commit e002434e authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-05-03

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Several BPF sockmap fixes mostly related to bugs in error path
   handling, that is, a bug in updating the scatterlist length /
   offset accounting, a missing sk_mem_uncharge() in redirect
   error handling, and a bug where the outstanding bytes counter
   sg_size was not zeroed, from John.

2) Fix two memory leaks in the x86-64 BPF JIT, one in an error
   path where we still don't converge after image was allocated
   and another one where BPF calls are used and JIT passes don't
   converge, from Daniel.

3) Minor fix in BPF selftests where in test_stacktrace_build_id()
   we drop useless args in urandom_read and we need to add a missing
   newline in a CHECK() error message, from Song.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7df40c26 b5b6ff73
...@@ -1236,6 +1236,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1236,6 +1236,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
for (pass = 0; pass < 20 || image; pass++) { for (pass = 0; pass < 20 || image; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx); proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) { if (proglen <= 0) {
out_image:
image = NULL; image = NULL;
if (header) if (header)
bpf_jit_binary_free(header); bpf_jit_binary_free(header);
...@@ -1246,8 +1247,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1246,8 +1247,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (proglen != oldproglen) { if (proglen != oldproglen) {
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
proglen, oldproglen); proglen, oldproglen);
prog = orig_prog; goto out_image;
goto out_addrs;
} }
break; break;
} }
...@@ -1283,7 +1283,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1283,7 +1283,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = orig_prog; prog = orig_prog;
} }
if (!prog->is_func || extra_pass) { if (!image || !prog->is_func || extra_pass) {
out_addrs: out_addrs:
kfree(addrs); kfree(addrs);
kfree(jit_data); kfree(jit_data);
......
...@@ -326,6 +326,9 @@ static int bpf_tcp_push(struct sock *sk, int apply_bytes, ...@@ -326,6 +326,9 @@ static int bpf_tcp_push(struct sock *sk, int apply_bytes,
if (ret > 0) { if (ret > 0) {
if (apply) if (apply)
apply_bytes -= ret; apply_bytes -= ret;
sg->offset += ret;
sg->length -= ret;
size -= ret; size -= ret;
offset += ret; offset += ret;
if (uncharge) if (uncharge)
...@@ -333,8 +336,6 @@ static int bpf_tcp_push(struct sock *sk, int apply_bytes, ...@@ -333,8 +336,6 @@ static int bpf_tcp_push(struct sock *sk, int apply_bytes,
goto retry; goto retry;
} }
sg->length = size;
sg->offset = offset;
return ret; return ret;
} }
...@@ -392,7 +393,8 @@ static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) ...@@ -392,7 +393,8 @@ static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
} while (i != md->sg_end); } while (i != md->sg_end);
} }
static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) static void free_bytes_sg(struct sock *sk, int bytes,
struct sk_msg_buff *md, bool charge)
{ {
struct scatterlist *sg = md->sg_data; struct scatterlist *sg = md->sg_data;
int i = md->sg_start, free; int i = md->sg_start, free;
...@@ -402,10 +404,12 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) ...@@ -402,10 +404,12 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
if (bytes < free) { if (bytes < free) {
sg[i].length -= bytes; sg[i].length -= bytes;
sg[i].offset += bytes; sg[i].offset += bytes;
if (charge)
sk_mem_uncharge(sk, bytes); sk_mem_uncharge(sk, bytes);
break; break;
} }
if (charge)
sk_mem_uncharge(sk, sg[i].length); sk_mem_uncharge(sk, sg[i].length);
put_page(sg_page(&sg[i])); put_page(sg_page(&sg[i]));
bytes -= sg[i].length; bytes -= sg[i].length;
...@@ -417,6 +421,7 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) ...@@ -417,6 +421,7 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
if (i == MAX_SKB_FRAGS) if (i == MAX_SKB_FRAGS)
i = 0; i = 0;
} }
md->sg_start = i;
} }
static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
...@@ -575,10 +580,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, ...@@ -575,10 +580,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
struct sk_msg_buff *md, struct sk_msg_buff *md,
int flags) int flags)
{ {
bool ingress = !!(md->flags & BPF_F_INGRESS);
struct smap_psock *psock; struct smap_psock *psock;
struct scatterlist *sg; struct scatterlist *sg;
int i, err, free = 0; int err = 0;
bool ingress = !!(md->flags & BPF_F_INGRESS);
sg = md->sg_data; sg = md->sg_data;
...@@ -606,16 +611,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, ...@@ -606,16 +611,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
out_rcu: out_rcu:
rcu_read_unlock(); rcu_read_unlock();
out: out:
i = md->sg_start; free_bytes_sg(NULL, send, md, false);
while (sg[i].length) { return err;
free += sg[i].length;
put_page(sg_page(&sg[i]));
sg[i].length = 0;
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
}
return free;
} }
static inline void bpf_md_init(struct smap_psock *psock) static inline void bpf_md_init(struct smap_psock *psock)
...@@ -700,19 +697,26 @@ static int bpf_exec_tx_verdict(struct smap_psock *psock, ...@@ -700,19 +697,26 @@ static int bpf_exec_tx_verdict(struct smap_psock *psock,
err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
lock_sock(sk); lock_sock(sk);
if (unlikely(err < 0)) {
free_start_sg(sk, m);
psock->sg_size = 0;
if (!cork)
*copied -= send;
} else {
psock->sg_size -= send;
}
if (cork) { if (cork) {
free_start_sg(sk, m); free_start_sg(sk, m);
psock->sg_size = 0;
kfree(m); kfree(m);
m = NULL; m = NULL;
err = 0;
} }
if (unlikely(err))
*copied -= err;
else
psock->sg_size -= send;
break; break;
case __SK_DROP: case __SK_DROP:
default: default:
free_bytes_sg(sk, send, m); free_bytes_sg(sk, send, m, true);
apply_bytes_dec(psock, send); apply_bytes_dec(psock, send);
*copied -= send; *copied -= send;
psock->sg_size -= send; psock->sg_size -= send;
......
...@@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file) ...@@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file)
static int cmd_load(char *arg) static int cmd_load(char *arg)
{ {
char *subcmd, *cont, *tmp = strdup(arg); char *subcmd, *cont = NULL, *tmp = strdup(arg);
int ret = CMD_OK; int ret = CMD_OK;
subcmd = strtok_r(tmp, " ", &cont); subcmd = strtok_r(tmp, " ", &cont);
...@@ -1073,6 +1073,9 @@ static int cmd_load(char *arg) ...@@ -1073,6 +1073,9 @@ static int cmd_load(char *arg)
bpf_reset(); bpf_reset();
bpf_reset_breakpoints(); bpf_reset_breakpoints();
if (!cont)
ret = CMD_ERR;
else
ret = cmd_load_bpf(cont); ret = cmd_load_bpf(cont);
} else if (matches(subcmd, "pcap") == 0) { } else if (matches(subcmd, "pcap") == 0) {
ret = cmd_load_pcap(cont); ret = cmd_load_pcap(cont);
......
...@@ -1108,7 +1108,7 @@ static void test_stacktrace_build_id(void) ...@@ -1108,7 +1108,7 @@ static void test_stacktrace_build_id(void)
assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
== 0); == 0);
assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0); assert(system("./urandom_read") == 0);
/* disable stack trace collection */ /* disable stack trace collection */
key = 0; key = 0;
val = 1; val = 1;
...@@ -1158,7 +1158,7 @@ static void test_stacktrace_build_id(void) ...@@ -1158,7 +1158,7 @@ static void test_stacktrace_build_id(void)
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
CHECK(build_id_matches < 1, "build id match", CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map"); "Didn't find expected build ID from the map\n");
disable_pmu: disable_pmu:
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment