Commit a5f7b0ee authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-02-28

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Add schedule points and reduce the number of loop iterations
   the test_bpf kernel module is performing in order to not hog
   the CPU for too long, from Eric.

2) Fix an out of bounds access in tail calls in the ppc64 BPF
   JIT compiler, from Daniel.

3) Fix a crash on arm64 on unaligned BPF xadd operations that
   could be triggered via interpreter and JIT, from Daniel.

Please not that once you merge net into net-next at some point, there
is a minor merge conflict in test_verifier.c since test cases had
been added at the end in both trees. Resolution is trivial: keep all
the test cases from both trees.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a6d50512 9960d766
...@@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 ...@@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* goto out; * goto out;
*/ */
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
PPC_BCC(COND_GE, out); PPC_BCC(COND_GE, out);
......
...@@ -1356,6 +1356,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) ...@@ -1356,6 +1356,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
return reg->type == PTR_TO_CTX; return reg->type == PTR_TO_CTX;
} }
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = cur_regs(env) + regno;
return type_is_pkt_pointer(reg->type);
}
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, const struct bpf_reg_state *reg,
int off, int size, bool strict) int off, int size, bool strict)
...@@ -1416,10 +1423,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env, ...@@ -1416,10 +1423,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
} }
static int check_ptr_alignment(struct bpf_verifier_env *env, static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, const struct bpf_reg_state *reg, int off,
int off, int size) int size, bool strict_alignment_once)
{ {
bool strict = env->strict_alignment; bool strict = env->strict_alignment || strict_alignment_once;
const char *pointer_desc = ""; const char *pointer_desc = "";
switch (reg->type) { switch (reg->type) {
...@@ -1576,9 +1583,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) ...@@ -1576,9 +1583,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* if t==write && value_regno==-1, some unknown value is stored into memory * if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory * if t==read && value_regno==-1, don't care what we read from memory
*/ */
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
int bpf_size, enum bpf_access_type t, int off, int bpf_size, enum bpf_access_type t,
int value_regno) int value_regno, bool strict_alignment_once)
{ {
struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno; struct bpf_reg_state *reg = regs + regno;
...@@ -1590,7 +1597,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -1590,7 +1597,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return size; return size;
/* alignment checks will add in reg->off themselves */ /* alignment checks will add in reg->off themselves */
err = check_ptr_alignment(env, reg, off, size); err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
if (err) if (err)
return err; return err;
...@@ -1735,21 +1742,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins ...@@ -1735,21 +1742,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
return -EACCES; return -EACCES;
} }
if (is_ctx_reg(env, insn->dst_reg)) { if (is_ctx_reg(env, insn->dst_reg) ||
verbose(env, "BPF_XADD stores into R%d context is not allowed\n", is_pkt_reg(env, insn->dst_reg)) {
insn->dst_reg); verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
"context" : "packet");
return -EACCES; return -EACCES;
} }
/* check whether atomic_add can read the memory */ /* check whether atomic_add can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1); BPF_SIZE(insn->code), BPF_READ, -1, true);
if (err) if (err)
return err; return err;
/* check whether atomic_add can write into the same memory */ /* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1); BPF_SIZE(insn->code), BPF_WRITE, -1, true);
} }
/* when register 'regno' is passed into function that will read 'access_size' /* when register 'regno' is passed into function that will read 'access_size'
...@@ -2388,7 +2397,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -2388,7 +2397,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
* is inferred from register state. * is inferred from register state.
*/ */
for (i = 0; i < meta.access_size; i++) { for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err) if (err)
return err; return err;
} }
...@@ -4632,7 +4642,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -4632,7 +4642,7 @@ static int do_check(struct bpf_verifier_env *env)
*/ */
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg); insn->dst_reg, false);
if (err) if (err)
return err; return err;
...@@ -4684,7 +4694,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -4684,7 +4694,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg); insn->src_reg, false);
if (err) if (err)
return err; return err;
...@@ -4719,7 +4729,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -4719,7 +4729,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, BPF_SIZE(insn->code), BPF_WRITE,
-1); -1, false);
if (err) if (err)
return err; return err;
......
...@@ -24,10 +24,11 @@ ...@@ -24,10 +24,11 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/sched.h>
/* General test specific settings */ /* General test specific settings */
#define MAX_SUBTESTS 3 #define MAX_SUBTESTS 3
#define MAX_TESTRUNS 10000 #define MAX_TESTRUNS 1000
#define MAX_DATA 128 #define MAX_DATA 128
#define MAX_INSNS 512 #define MAX_INSNS 512
#define MAX_K 0xffffFFFF #define MAX_K 0xffffFFFF
...@@ -6582,6 +6583,7 @@ static __init int test_bpf(void) ...@@ -6582,6 +6583,7 @@ static __init int test_bpf(void)
struct bpf_prog *fp; struct bpf_prog *fp;
int err; int err;
cond_resched();
if (exclude_test(i)) if (exclude_test(i))
continue; continue;
......
...@@ -11163,6 +11163,64 @@ static struct bpf_test tests[] = { ...@@ -11163,6 +11163,64 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT, .prog_type = BPF_PROG_TYPE_TRACEPOINT,
}, },
{
"xadd/w check unaligned stack",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned stack access off",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"xadd/w check unaligned map",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = REJECT,
.errstr = "misaligned value access off",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"xadd/w check unaligned pkt",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 99),
BPF_JMP_IMM(BPF_JA, 0, 0, 6),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "BPF_XADD stores into R2 packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
}; };
static int probe_filter_length(const struct bpf_insn *fp) static int probe_filter_length(const struct bpf_insn *fp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment