Commit f9e0ce3d authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2020-05-29

The following pull-request contains BPF updates for your *net* tree.

We've added 6 non-merge commits during the last 7 day(s) which contain
a total of 4 files changed, 55 insertions(+), 34 deletions(-).

The main changes are:

1) minor verifier fix for fmod_ret progs, from Alexei.

2) af_xdp overflow check, from Bjorn.

3) minor verifier fix for 32bit assignment, from John.

4) powerpc has non-overlapping addr space, from Petr.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 942110fd cf66c29b
...@@ -126,6 +126,7 @@ config PPC ...@@ -126,6 +126,7 @@ config PPC
select ARCH_HAS_MMIOWB if PPC64 select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API select ARCH_HAS_PMEM_API
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS select ARCH_HAS_MEMBARRIER_CALLBACKS
......
...@@ -1168,14 +1168,14 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg) ...@@ -1168,14 +1168,14 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
* but must be positive otherwise set to worse case bounds * but must be positive otherwise set to worse case bounds
* and refine later from tnum. * and refine later from tnum.
*/ */
if (reg->s32_min_value > 0) if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
reg->smin_value = reg->s32_min_value;
else
reg->smin_value = 0;
if (reg->s32_max_value > 0)
reg->smax_value = reg->s32_max_value; reg->smax_value = reg->s32_max_value;
else else
reg->smax_value = U32_MAX; reg->smax_value = U32_MAX;
if (reg->s32_min_value >= 0)
reg->smin_value = reg->s32_min_value;
else
reg->smin_value = 0;
} }
static void __reg_combine_32_into_64(struct bpf_reg_state *reg) static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
...@@ -10428,22 +10428,13 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) ...@@ -10428,22 +10428,13 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
} }
#define SECURITY_PREFIX "security_" #define SECURITY_PREFIX "security_"
static int check_attach_modify_return(struct bpf_verifier_env *env) static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
{ {
struct bpf_prog *prog = env->prog;
unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr;
/* This is expected to be cleaned up in the future with the KRSI effort
* introducing the LSM_HOOK macro for cleaning up lsm_hooks.h.
*/
if (within_error_injection_list(addr) || if (within_error_injection_list(addr) ||
!strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
sizeof(SECURITY_PREFIX) - 1)) sizeof(SECURITY_PREFIX) - 1))
return 0; return 0;
verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n",
prog->aux->attach_btf_id, prog->aux->attach_func_name);
return -EINVAL; return -EINVAL;
} }
...@@ -10654,11 +10645,18 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) ...@@ -10654,11 +10645,18 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
goto out; goto out;
} }
} }
if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
ret = check_attach_modify_return(prog, addr);
if (ret)
verbose(env, "%s() is not modifiable\n",
prog->aux->attach_func_name);
}
if (ret)
goto out;
tr->func.addr = (void *)addr; tr->func.addr = (void *)addr;
prog->aux->trampoline = tr; prog->aux->trampoline = tr;
if (prog->expected_attach_type == BPF_MODIFY_RETURN)
ret = check_attach_modify_return(env);
out: out:
mutex_unlock(&tr->mutex); mutex_unlock(&tr->mutex);
if (ret) if (ret)
......
...@@ -341,8 +341,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) ...@@ -341,8 +341,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{ {
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
u32 chunk_size = mr->chunk_size, headroom = mr->headroom; u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
u64 npgs, addr = mr->addr, size = mr->len;
unsigned int chunks, chunks_per_page; unsigned int chunks, chunks_per_page;
u64 addr = mr->addr, size = mr->len;
int err; int err;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
...@@ -372,6 +372,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) ...@@ -372,6 +372,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if ((addr + size) < addr) if ((addr + size) < addr)
return -EINVAL; return -EINVAL;
npgs = div_u64(size, PAGE_SIZE);
if (npgs > U32_MAX)
return -EINVAL;
chunks = (unsigned int)div_u64(size, chunk_size); chunks = (unsigned int)div_u64(size, chunk_size);
if (chunks == 0) if (chunks == 0)
return -EINVAL; return -EINVAL;
...@@ -391,7 +395,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) ...@@ -391,7 +395,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->size = size; umem->size = size;
umem->headroom = headroom; umem->headroom = headroom;
umem->chunk_size_nohr = chunk_size - headroom; umem->chunk_size_nohr = chunk_size - headroom;
umem->npgs = size / PAGE_SIZE; umem->npgs = (u32)npgs;
umem->pgs = NULL; umem->pgs = NULL;
umem->user = NULL; umem->user = NULL;
umem->flags = mr->flags; umem->flags = mr->flags;
......
...@@ -238,7 +238,7 @@ ...@@ -238,7 +238,7 @@
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
/* r1 = [0x00, 0xff] */ /* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
...@@ -253,10 +253,6 @@ ...@@ -253,10 +253,6 @@
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/ */
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = 0 or
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* error on OOB pointer computation */ /* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* exit */ /* exit */
...@@ -265,8 +261,10 @@ ...@@ -265,8 +261,10 @@
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */ /* not actually fully unbounded, but the bound is very high */
.errstr = "value 72057594021150720 makes map_value pointer be out of bounds", .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
.result = REJECT .result_unpriv = REJECT,
.errstr = "value -4294967168 makes map_value pointer be out of bounds",
.result = REJECT,
}, },
{ {
"bounds check after truncation of boundary-crossing range (2)", "bounds check after truncation of boundary-crossing range (2)",
...@@ -276,7 +274,7 @@ ...@@ -276,7 +274,7 @@
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
/* r1 = [0x00, 0xff] */ /* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
...@@ -293,10 +291,6 @@ ...@@ -293,10 +291,6 @@
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/ */
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = 0 or
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* error on OOB pointer computation */ /* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* exit */ /* exit */
...@@ -305,8 +299,10 @@ ...@@ -305,8 +299,10 @@
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */ /* not actually fully unbounded, but the bound is very high */
.errstr = "value 72057594021150720 makes map_value pointer be out of bounds", .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
.result = REJECT .result_unpriv = REJECT,
.errstr = "value -4294967168 makes map_value pointer be out of bounds",
.result = REJECT,
}, },
{ {
"bounds check after wrapping 32-bit addition", "bounds check after wrapping 32-bit addition",
...@@ -539,3 +535,25 @@ ...@@ -539,3 +535,25 @@
}, },
.result = ACCEPT .result = ACCEPT
}, },
{
"assigning 32bit bounds to 64bit for wA = 0, wB = wA",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV32_IMM(BPF_REG_9, 0),
BPF_MOV32_REG(BPF_REG_2, BPF_REG_9),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment