Commit 3d27484e authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-03-21

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Follow-up fix to the fault injection framework to prevent jump
   optimization on the kprobe by installing a dummy post-handler,
   from Masami.

2) Drop bpf_perf_prog_read_value helper from tracepoint type programs
   which was mistakenly added there and would otherwise crash due to
   wrong input context, from Yonghong.

3) Fix a crash in BPF fs when compiled with clang. Code appears to
   be fine just that clang tries to overly aggressive optimize in
   non C conform ways, therefore fix the kernel's Makefile to
   generally prevent such issues, from Daniel.

4) Skip unnecessary capability checks in bpf syscall, which is otherwise
   triggering unnecessary security hooks on capability checking and
   causing false alarms on unprivileged processes trying to access
   CAP_SYS_ADMIN restricted infra, from Chenbo.

5) Fix the test_bpf.ko module when CONFIG_BPF_JIT_ALWAYS_ON is set
   with regards to a test case that is really just supposed to fail
   on x8_64 JIT but not others, from Thadeu.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5f2fb802 87e0d4f0
......@@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# clang sets -fmerge-all-constants by default as optimization, but this
# is non-conforming behavior for C and in fact breaks the kernel, so we
# need to disable it here generally.
KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
# for gcc -fno-merge-all-constants disables everything, but it is fine
# to have actual conforming behavior enabled.
KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
......
......@@ -1845,7 +1845,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
union bpf_attr attr = {};
int err;
if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
return -EPERM;
err = check_uarg_tail_zero(uattr, sizeof(attr), size);
......
......@@ -14,6 +14,15 @@
static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs,
unsigned long flags)
{
/*
* A dummy post handler is required to prohibit optimizing, because
* jump optimization does not support execution path overriding.
*/
}
struct fei_attr {
struct list_head list;
struct kprobe kp;
......@@ -56,6 +65,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
return NULL;
}
attr->kp.pre_handler = fei_kprobe_handler;
attr->kp.post_handler = fei_post_handler;
attr->retval = adjust_error_retval(addr, 0);
INIT_LIST_HEAD(&attr->list);
}
......
......@@ -661,7 +661,41 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
.arg3_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
return &bpf_perf_event_output_proto_tp;
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_tp;
default:
return tracing_func_proto(func_id);
}
}
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
return true;
}
const struct bpf_verifier_ops tracepoint_verifier_ops = {
.get_func_proto = tp_prog_func_proto,
.is_valid_access = tp_prog_is_valid_access,
};
const struct bpf_prog_ops tracepoint_prog_ops = {
};
BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
struct bpf_perf_event_value *, buf, u32, size)
{
int err = -EINVAL;
......@@ -678,8 +712,8 @@ BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
return err;
}
static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
.func = bpf_perf_prog_read_value_tp,
static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
.func = bpf_perf_prog_read_value,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
......@@ -687,7 +721,7 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
.arg3_type = ARG_CONST_SIZE,
};
static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
......@@ -695,34 +729,12 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_tp;
case BPF_FUNC_perf_prog_read_value:
return &bpf_perf_prog_read_value_proto_tp;
return &bpf_perf_prog_read_value_proto;
default:
return tracing_func_proto(func_id);
}
}
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
return true;
}
const struct bpf_verifier_ops tracepoint_verifier_ops = {
.get_func_proto = tp_prog_func_proto,
.is_valid_access = tp_prog_is_valid_access,
};
const struct bpf_prog_ops tracepoint_prog_ops = {
};
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
......@@ -779,7 +791,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
}
const struct bpf_verifier_ops perf_event_verifier_ops = {
.get_func_proto = tp_prog_func_proto,
.get_func_proto = pe_prog_func_proto,
.is_valid_access = pe_prog_is_valid_access,
.convert_ctx_access = pe_prog_convert_ctx_access,
};
......
......@@ -5467,7 +5467,7 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
#else
CLASSIC | FLAG_NO_DATA,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment