Commit 6788fac8 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-10-27

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix toctou race in BTF header validation, from Martin and Wenwen.

2) Fix devmap interface comparison in notifier call which was
   neglecting netns, from Taehee.

3) Several fixes in various places, for example, correcting direct
   packet access and helper function availability, from Daniel.

4) Fix BPF kselftest config fragment to include af_xdp and sockmap,
   from Naresh.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 345671ea d8fd9e10
...@@ -92,6 +92,14 @@ Values : ...@@ -92,6 +92,14 @@ Values :
0 - disable JIT kallsyms export (default value) 0 - disable JIT kallsyms export (default value)
1 - enable JIT kallsyms export for privileged users only 1 - enable JIT kallsyms export for privileged users only
bpf_jit_limit
-------------
This enforces a global limit for memory allocations to the BPF JIT
compiler in order to reject unprivileged JIT requests once it has
been surpassed. bpf_jit_limit contains the value of the global limit
in bytes.
dev_weight dev_weight
-------------- --------------
......
...@@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, ...@@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
extern int bpf_jit_enable; extern int bpf_jit_enable;
extern int bpf_jit_harden; extern int bpf_jit_harden;
extern int bpf_jit_kallsyms; extern int bpf_jit_kallsyms;
extern int bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
......
...@@ -2067,56 +2067,47 @@ static int btf_check_sec_info(struct btf_verifier_env *env, ...@@ -2067,56 +2067,47 @@ static int btf_check_sec_info(struct btf_verifier_env *env,
return 0; return 0;
} }
static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data, static int btf_parse_hdr(struct btf_verifier_env *env)
u32 btf_data_size)
{ {
u32 hdr_len, hdr_copy, btf_data_size;
const struct btf_header *hdr; const struct btf_header *hdr;
u32 hdr_len, hdr_copy;
/*
* Minimal part of the "struct btf_header" that
* contains the hdr_len.
*/
struct btf_min_header {
u16 magic;
u8 version;
u8 flags;
u32 hdr_len;
} __user *min_hdr;
struct btf *btf; struct btf *btf;
int err; int err;
btf = env->btf; btf = env->btf;
min_hdr = btf_data; btf_data_size = btf->data_size;
if (btf_data_size < sizeof(*min_hdr)) { if (btf_data_size <
offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
btf_verifier_log(env, "hdr_len not found"); btf_verifier_log(env, "hdr_len not found");
return -EINVAL; return -EINVAL;
} }
if (get_user(hdr_len, &min_hdr->hdr_len)) hdr = btf->data;
return -EFAULT; hdr_len = hdr->hdr_len;
if (btf_data_size < hdr_len) { if (btf_data_size < hdr_len) {
btf_verifier_log(env, "btf_header not found"); btf_verifier_log(env, "btf_header not found");
return -EINVAL; return -EINVAL;
} }
err = bpf_check_uarg_tail_zero(btf_data, sizeof(btf->hdr), hdr_len); /* Ensure the unsupported header fields are zero */
if (err) { if (hdr_len > sizeof(btf->hdr)) {
if (err == -E2BIG) u8 *expected_zero = btf->data + sizeof(btf->hdr);
u8 *end = btf->data + hdr_len;
for (; expected_zero < end; expected_zero++) {
if (*expected_zero) {
btf_verifier_log(env, "Unsupported btf_header"); btf_verifier_log(env, "Unsupported btf_header");
return err; return -E2BIG;
}
}
} }
hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
if (copy_from_user(&btf->hdr, btf_data, hdr_copy)) memcpy(&btf->hdr, btf->data, hdr_copy);
return -EFAULT;
hdr = &btf->hdr; hdr = &btf->hdr;
if (hdr->hdr_len != hdr_len)
return -EINVAL;
btf_verifier_log_hdr(env, btf_data_size); btf_verifier_log_hdr(env, btf_data_size);
if (hdr->magic != BTF_MAGIC) { if (hdr->magic != BTF_MAGIC) {
...@@ -2186,10 +2177,6 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, ...@@ -2186,10 +2177,6 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
} }
env->btf = btf; env->btf = btf;
err = btf_parse_hdr(env, btf_data, btf_data_size);
if (err)
goto errout;
data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
if (!data) { if (!data) {
err = -ENOMEM; err = -ENOMEM;
...@@ -2198,13 +2185,18 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, ...@@ -2198,13 +2185,18 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
btf->data = data; btf->data = data;
btf->data_size = btf_data_size; btf->data_size = btf_data_size;
btf->nohdr_data = btf->data + btf->hdr.hdr_len;
if (copy_from_user(data, btf_data, btf_data_size)) { if (copy_from_user(data, btf_data, btf_data_size)) {
err = -EFAULT; err = -EFAULT;
goto errout; goto errout;
} }
err = btf_parse_hdr(env);
if (err)
goto errout;
btf->nohdr_data = btf->data + btf->hdr.hdr_len;
err = btf_parse_str_sec(env); err = btf_parse_str_sec(env);
if (err) if (err)
goto errout; goto errout;
......
...@@ -365,10 +365,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) ...@@ -365,10 +365,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
} }
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
/* All BPF JIT sysctl knobs here. */ /* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly; int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly; int bpf_jit_kallsyms __read_mostly;
int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
static __always_inline void static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog, bpf_get_prog_addr_region(const struct bpf_prog *prog,
...@@ -577,27 +580,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, ...@@ -577,27 +580,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret; return ret;
} }
static atomic_long_t bpf_jit_current;
#if defined(MODULES_VADDR)
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
PAGE_SIZE), INT_MAX);
return 0;
}
pure_initcall(bpf_jit_charge_init);
#endif
static int bpf_jit_charge_modmem(u32 pages)
{
if (atomic_long_add_return(pages, &bpf_jit_current) >
(bpf_jit_limit >> PAGE_SHIFT)) {
if (!capable(CAP_SYS_ADMIN)) {
atomic_long_sub(pages, &bpf_jit_current);
return -EPERM;
}
}
return 0;
}
static void bpf_jit_uncharge_modmem(u32 pages)
{
atomic_long_sub(pages, &bpf_jit_current);
}
struct bpf_binary_header * struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment, unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns) bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ {
struct bpf_binary_header *hdr; struct bpf_binary_header *hdr;
unsigned int size, hole, start; u32 size, hole, start, pages;
/* Most of BPF filters are really small, but if some of them /* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a * fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions. * random section of illegal instructions.
*/ */
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
pages = size / PAGE_SIZE;
if (bpf_jit_charge_modmem(pages))
return NULL;
hdr = module_alloc(size); hdr = module_alloc(size);
if (hdr == NULL) if (!hdr) {
bpf_jit_uncharge_modmem(pages);
return NULL; return NULL;
}
/* Fill space with illegal/arch-dep instructions. */ /* Fill space with illegal/arch-dep instructions. */
bpf_fill_ill_insns(hdr, size); bpf_fill_ill_insns(hdr, size);
hdr->pages = size / PAGE_SIZE; hdr->pages = pages;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr)); PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1); start = (get_random_int() % hole) & ~(alignment - 1);
...@@ -610,7 +650,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, ...@@ -610,7 +650,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr) void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{ {
u32 pages = hdr->pages;
module_memfree(hdr); module_memfree(hdr);
bpf_jit_uncharge_modmem(pages);
} }
/* This symbol is only overridden by archs that have different /* This symbol is only overridden by archs that have different
......
...@@ -512,8 +512,7 @@ static int dev_map_notification(struct notifier_block *notifier, ...@@ -512,8 +512,7 @@ static int dev_map_notification(struct notifier_block *notifier,
struct bpf_dtab_netdev *dev, *odev; struct bpf_dtab_netdev *dev, *odev;
dev = READ_ONCE(dtab->netdev_map[i]); dev = READ_ONCE(dtab->netdev_map[i]);
if (!dev || if (!dev || netdev != dev->dev)
dev->dev->ifindex != netdev->ifindex)
continue; continue;
odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
if (dev == odev) if (dev == odev)
......
...@@ -99,7 +99,6 @@ BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) ...@@ -99,7 +99,6 @@ BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
const struct bpf_func_proto bpf_map_pop_elem_proto = { const struct bpf_func_proto bpf_map_pop_elem_proto = {
.func = bpf_map_pop_elem, .func = bpf_map_pop_elem,
.gpl_only = false, .gpl_only = false,
.pkt_access = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
...@@ -113,7 +112,6 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) ...@@ -113,7 +112,6 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
const struct bpf_func_proto bpf_map_peek_elem_proto = { const struct bpf_func_proto bpf_map_peek_elem_proto = {
.func = bpf_map_pop_elem, .func = bpf_map_pop_elem,
.gpl_only = false, .gpl_only = false,
.pkt_access = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
......
...@@ -122,6 +122,7 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete) ...@@ -122,6 +122,7 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
raw_spin_lock_irqsave(&qs->lock, flags); raw_spin_lock_irqsave(&qs->lock, flags);
if (queue_stack_map_is_empty(qs)) { if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
err = -ENOENT; err = -ENOENT;
goto out; goto out;
} }
...@@ -151,6 +152,7 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete) ...@@ -151,6 +152,7 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
raw_spin_lock_irqsave(&qs->lock, flags); raw_spin_lock_irqsave(&qs->lock, flags);
if (queue_stack_map_is_empty(qs)) { if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
err = -ENOENT; err = -ENOENT;
goto out; goto out;
} }
......
...@@ -1387,21 +1387,24 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, ...@@ -1387,21 +1387,24 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
enum bpf_access_type t) enum bpf_access_type t)
{ {
switch (env->prog->type) { switch (env->prog->type) {
/* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_LWT_SEG6LOCAL:
case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_SK_REUSEPORT:
/* dst_input() and dst_output() can't write for now */ case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SKB:
if (t == BPF_WRITE) if (t == BPF_WRITE)
return false; return false;
/* fallthrough */ /* fallthrough */
/* Program types with direct read + write access go here! */
case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG: case BPF_PROG_TYPE_SK_MSG:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
if (meta) if (meta)
return meta->pkt_access; return meta->pkt_access;
...@@ -5706,7 +5709,11 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -5706,7 +5709,11 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
bool is_narrower_load; bool is_narrower_load;
u32 target_size; u32 target_size;
if (ops->gen_prologue) { if (ops->gen_prologue || env->seen_direct_write) {
if (!ops->gen_prologue) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog); env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) { if (cnt >= ARRAY_SIZE(insn_buf)) {
......
...@@ -5264,8 +5264,6 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5264,8 +5264,6 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_msg_pull_data_proto; return &bpf_msg_pull_data_proto;
case BPF_FUNC_msg_push_data: case BPF_FUNC_msg_push_data:
return &bpf_msg_push_data_proto; return &bpf_msg_push_data_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
...@@ -5296,8 +5294,6 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5296,8 +5294,6 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_redirect_map_proto; return &bpf_sk_redirect_map_proto;
case BPF_FUNC_sk_redirect_hash: case BPF_FUNC_sk_redirect_hash:
return &bpf_sk_redirect_hash_proto; return &bpf_sk_redirect_hash_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
#ifdef CONFIG_INET #ifdef CONFIG_INET
case BPF_FUNC_sk_lookup_tcp: case BPF_FUNC_sk_lookup_tcp:
return &bpf_sk_lookup_tcp_proto; return &bpf_sk_lookup_tcp_proto;
...@@ -5496,7 +5492,13 @@ static bool cg_skb_is_valid_access(int off, int size, ...@@ -5496,7 +5492,13 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, flow_keys): case bpf_ctx_range(struct __sk_buff, flow_keys):
return false; return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
if (!capable(CAP_SYS_ADMIN))
return false;
break;
} }
if (type == BPF_WRITE) { if (type == BPF_WRITE) {
switch (off) { switch (off) {
case bpf_ctx_range(struct __sk_buff, mark): case bpf_ctx_range(struct __sk_buff, mark):
...@@ -5638,6 +5640,15 @@ static bool sock_filter_is_valid_access(int off, int size, ...@@ -5638,6 +5640,15 @@ static bool sock_filter_is_valid_access(int off, int size,
prog->expected_attach_type); prog->expected_attach_type);
} }
static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog)
{
/* Neither direct read nor direct write requires any preliminary
* action.
*/
return 0;
}
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog, int drop_verdict) const struct bpf_prog *prog, int drop_verdict)
{ {
...@@ -7204,6 +7215,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = { ...@@ -7204,6 +7215,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
.get_func_proto = xdp_func_proto, .get_func_proto = xdp_func_proto,
.is_valid_access = xdp_is_valid_access, .is_valid_access = xdp_is_valid_access,
.convert_ctx_access = xdp_convert_ctx_access, .convert_ctx_access = xdp_convert_ctx_access,
.gen_prologue = bpf_noop_prologue,
}; };
const struct bpf_prog_ops xdp_prog_ops = { const struct bpf_prog_ops xdp_prog_ops = {
...@@ -7302,6 +7314,7 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = { ...@@ -7302,6 +7314,7 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = {
.get_func_proto = sk_msg_func_proto, .get_func_proto = sk_msg_func_proto,
.is_valid_access = sk_msg_is_valid_access, .is_valid_access = sk_msg_is_valid_access,
.convert_ctx_access = sk_msg_convert_ctx_access, .convert_ctx_access = sk_msg_convert_ctx_access,
.gen_prologue = bpf_noop_prologue,
}; };
const struct bpf_prog_ops sk_msg_prog_ops = { const struct bpf_prog_ops sk_msg_prog_ops = {
......
...@@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, ...@@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
return ret; return ret;
} }
# ifdef CONFIG_HAVE_EBPF_JIT
static int static int
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
...@@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, ...@@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
return proc_dointvec_minmax(table, write, buffer, lenp, ppos); return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
} }
# endif
#endif #endif
static struct ctl_table net_core_table[] = { static struct ctl_table net_core_table[] = {
...@@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = { ...@@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = {
.extra2 = &one, .extra2 = &one,
}, },
# endif # endif
{
.procname = "bpf_jit_limit",
.data = &bpf_jit_limit,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec_minmax_bpf_restricted,
.extra1 = &one,
},
#endif #endif
{ {
.procname = "netdev_tstamp_prequeue", .procname = "netdev_tstamp_prequeue",
......
...@@ -20,3 +20,5 @@ CONFIG_VXLAN=y ...@@ -20,3 +20,5 @@ CONFIG_VXLAN=y
CONFIG_GENEVE=y CONFIG_GENEVE=y
CONFIG_NET_CLS_FLOWER=m CONFIG_NET_CLS_FLOWER=m
CONFIG_LWTUNNEL=y CONFIG_LWTUNNEL=y
CONFIG_BPF_STREAM_PARSER=y
CONFIG_XDP_SOCKETS=y
...@@ -4891,6 +4891,8 @@ static struct bpf_test tests[] = { ...@@ -4891,6 +4891,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = ACCEPT, .result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "invalid bpf_context access off=76 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB, .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
}, },
{ {
...@@ -5146,6 +5148,7 @@ static struct bpf_test tests[] = { ...@@ -5146,6 +5148,7 @@ static struct bpf_test tests[] = {
.fixup_cgroup_storage = { 1 }, .fixup_cgroup_storage = { 1 },
.result = REJECT, .result = REJECT,
.errstr = "get_local_storage() doesn't support non-zero flags", .errstr = "get_local_storage() doesn't support non-zero flags",
.errstr_unpriv = "R2 leaks addr into helper function",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB, .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
}, },
{ {
...@@ -5261,6 +5264,7 @@ static struct bpf_test tests[] = { ...@@ -5261,6 +5264,7 @@ static struct bpf_test tests[] = {
.fixup_percpu_cgroup_storage = { 1 }, .fixup_percpu_cgroup_storage = { 1 },
.result = REJECT, .result = REJECT,
.errstr = "get_local_storage() doesn't support non-zero flags", .errstr = "get_local_storage() doesn't support non-zero flags",
.errstr_unpriv = "R2 leaks addr into helper function",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB, .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
}, },
{ {
...@@ -14050,6 +14054,13 @@ static void get_unpriv_disabled() ...@@ -14050,6 +14054,13 @@ static void get_unpriv_disabled()
fclose(fd); fclose(fd);
} }
static bool test_as_unpriv(struct bpf_test *test)
{
return !test->prog_type ||
test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
}
static int do_test(bool unpriv, unsigned int from, unsigned int to) static int do_test(bool unpriv, unsigned int from, unsigned int to)
{ {
int i, passes = 0, errors = 0, skips = 0; int i, passes = 0, errors = 0, skips = 0;
...@@ -14060,10 +14071,10 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to) ...@@ -14060,10 +14071,10 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
/* Program types that are not supported by non-root we /* Program types that are not supported by non-root we
* skip right away. * skip right away.
*/ */
if (!test->prog_type && unpriv_disabled) { if (test_as_unpriv(test) && unpriv_disabled) {
printf("#%d/u %s SKIP\n", i, test->descr); printf("#%d/u %s SKIP\n", i, test->descr);
skips++; skips++;
} else if (!test->prog_type) { } else if (test_as_unpriv(test)) {
if (!unpriv) if (!unpriv)
set_admin(false); set_admin(false);
printf("#%d/u %s ", i, test->descr); printf("#%d/u %s ", i, test->descr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment