Commit 2fae5d0e authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-ctx-narrow'

Yonghong Song says:

====================
bpf: permit bpf program narrower loads for ctx fields

Today, if users try to access a ctx field through a narrower load, e.g.,
__be16 prot = __sk_buff->protocol, verifier will fail.
This set contains the verifier change to permit such loads for
certain ctx fields as well as the new test cases in selftests/bpf.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a88e2676 18f3d6be
...@@ -157,7 +157,7 @@ struct bpf_verifier_ops { ...@@ -157,7 +157,7 @@ struct bpf_verifier_ops {
* with 'type' (read or write) is allowed * with 'type' (read or write) is allowed
*/ */
bool (*is_valid_access)(int off, int size, enum bpf_access_type type, bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type); enum bpf_reg_type *reg_type, int *ctx_field_size);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog); const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, u32 (*convert_ctx_access)(enum bpf_access_type type,
......
...@@ -73,6 +73,7 @@ struct bpf_insn_aux_data { ...@@ -73,6 +73,7 @@ struct bpf_insn_aux_data {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
}; };
int ctx_field_size; /* the ctx field size for load/store insns, maybe 0 */
}; };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
......
...@@ -758,15 +758,26 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, ...@@ -758,15 +758,26 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
} }
/* check access to 'struct bpf_context' fields */ /* check access to 'struct bpf_context' fields */
static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type) enum bpf_access_type t, enum bpf_reg_type *reg_type)
{ {
int ctx_field_size = 0;
/* for analyzer ctx accesses are already validated and converted */ /* for analyzer ctx accesses are already validated and converted */
if (env->analyzer_ops) if (env->analyzer_ops)
return 0; return 0;
if (env->prog->aux->ops->is_valid_access && if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { env->prog->aux->ops->is_valid_access(off, size, t, reg_type, &ctx_field_size)) {
/* a non zero ctx_field_size indicates:
* . For this field, the prog type specific ctx conversion algorithm
* only supports whole field access.
* . This ctx access is a candiate for later verifier transformation
* to load the whole field and then apply a mask to get correct result.
*/
if (ctx_field_size)
env->insn_aux_data[insn_idx].ctx_field_size = ctx_field_size;
/* remember the offset of last byte accessed in ctx */ /* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size) if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size; env->prog->aux->max_ctx_offset = off + size;
...@@ -868,7 +879,7 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, ...@@ -868,7 +879,7 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
* if t==write && value_regno==-1, some unknown value is stored into memory * if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory * if t==read && value_regno==-1, don't care what we read from memory
*/ */
static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
int bpf_size, enum bpf_access_type t, int bpf_size, enum bpf_access_type t,
int value_regno) int value_regno)
{ {
...@@ -911,7 +922,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, ...@@ -911,7 +922,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
verbose("R%d leaks addr into ctx\n", value_regno); verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES; return -EACCES;
} }
err = check_ctx_access(env, off, size, t, &reg_type); err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
if (!err && t == BPF_READ && value_regno >= 0) { if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown_value_and_range(state->regs, mark_reg_unknown_value_and_range(state->regs,
value_regno); value_regno);
...@@ -972,7 +983,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, ...@@ -972,7 +983,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
return err; return err;
} }
static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{ {
struct bpf_reg_state *regs = env->cur_state.regs; struct bpf_reg_state *regs = env->cur_state.regs;
int err; int err;
...@@ -994,13 +1005,13 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) ...@@ -994,13 +1005,13 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err; return err;
/* check whether atomic_add can read the memory */ /* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1); BPF_SIZE(insn->code), BPF_READ, -1);
if (err) if (err)
return err; return err;
/* check whether atomic_add can write into the same memory */ /* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn->dst_reg, insn->off, return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1); BPF_SIZE(insn->code), BPF_WRITE, -1);
} }
...@@ -1416,7 +1427,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) ...@@ -1416,7 +1427,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
* is inferred from register state. * is inferred from register state.
*/ */
for (i = 0; i < meta.access_size; i++) { for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
if (err) if (err)
return err; return err;
} }
...@@ -2993,18 +3004,12 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -2993,18 +3004,12 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (src_reg + off) is readable, /* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func * the state of dst_reg will be updated by this func
*/ */
err = check_mem_access(env, insn->src_reg, insn->off, err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg); insn->dst_reg);
if (err) if (err)
return err; return err;
if (BPF_SIZE(insn->code) != BPF_W &&
BPF_SIZE(insn->code) != BPF_DW) {
insn_idx++;
continue;
}
prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
if (*prev_src_type == NOT_INIT) { if (*prev_src_type == NOT_INIT) {
...@@ -3032,7 +3037,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -3032,7 +3037,7 @@ static int do_check(struct bpf_verifier_env *env)
enum bpf_reg_type *prev_dst_type, dst_reg_type; enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) { if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn); err = check_xadd(env, insn_idx, insn);
if (err) if (err)
return err; return err;
insn_idx++; insn_idx++;
...@@ -3051,7 +3056,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -3051,7 +3056,7 @@ static int do_check(struct bpf_verifier_env *env)
dst_reg_type = regs[insn->dst_reg].type; dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg); insn->src_reg);
if (err) if (err)
...@@ -3080,7 +3085,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -3080,7 +3085,7 @@ static int do_check(struct bpf_verifier_env *env)
return err; return err;
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, BPF_SIZE(insn->code), BPF_WRITE,
-1); -1);
if (err) if (err)
...@@ -3383,7 +3388,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -3383,7 +3388,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
struct bpf_insn insn_buf[16], *insn; struct bpf_insn insn_buf[16], *insn;
struct bpf_prog *new_prog; struct bpf_prog *new_prog;
enum bpf_access_type type; enum bpf_access_type type;
int i, cnt, delta = 0; int i, cnt, off, size, ctx_field_size, is_narrower_load, delta = 0;
if (ops->gen_prologue) { if (ops->gen_prologue) {
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
...@@ -3423,11 +3428,39 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -3423,11 +3428,39 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
continue; continue;
off = insn->off;
size = bpf_size_to_bytes(BPF_SIZE(insn->code));
ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
is_narrower_load = (type == BPF_READ && size < ctx_field_size);
/* If the read access is a narrower load of the field,
* convert to a 4/8-byte load, to minimum program type specific
* convert_ctx_access changes. If conversion is successful,
* we will apply proper mask to the result.
*/
if (is_narrower_load) {
int size_code = BPF_H;
if (ctx_field_size == 4)
size_code = BPF_W;
else if (ctx_field_size == 8)
size_code = BPF_DW;
insn->off = off & ~(ctx_field_size - 1);
insn->code = BPF_LDX | BPF_MEM | size_code;
}
cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog); cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n"); verbose("bpf verifier is misconfigured\n");
return -EINVAL; return -EINVAL;
} }
if (is_narrower_load) {
if (ctx_field_size <= 4)
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
else
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog) if (!new_prog)
......
...@@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func ...@@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
/* bpf+kprobe programs can access fields of 'struct pt_regs' */ /* bpf+kprobe programs can access fields of 'struct pt_regs' */
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type, int *ctx_field_size)
{ {
if (off < 0 || off >= sizeof(struct pt_regs)) if (off < 0 || off >= sizeof(struct pt_regs))
return false; return false;
...@@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) ...@@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
} }
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type, int *ctx_field_size)
{ {
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false; return false;
...@@ -581,17 +581,26 @@ const struct bpf_verifier_ops tracepoint_prog_ops = { ...@@ -581,17 +581,26 @@ const struct bpf_verifier_ops tracepoint_prog_ops = {
}; };
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type, int *ctx_field_size)
{ {
int sample_period_off;
if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
return false; return false;
if (type != BPF_READ) if (type != BPF_READ)
return false; return false;
if (off % size != 0) if (off % size != 0)
return false; return false;
if (off == offsetof(struct bpf_perf_event_data, sample_period)) {
if (size != sizeof(u64)) /* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
return false; sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
*ctx_field_size = 8;
#ifdef __LITTLE_ENDIAN
return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
#else
return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
#endif
} else { } else {
if (size != sizeof(long)) if (size != sizeof(long))
return false; return false;
......
...@@ -2856,7 +2856,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id) ...@@ -2856,7 +2856,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id)
} }
} }
static bool __is_valid_access(int off, int size) static bool __is_valid_access(int off, int size, enum bpf_access_type type,
int *ctx_field_size)
{ {
if (off < 0 || off >= sizeof(struct __sk_buff)) if (off < 0 || off >= sizeof(struct __sk_buff))
return false; return false;
...@@ -2872,9 +2873,27 @@ static bool __is_valid_access(int off, int size) ...@@ -2872,9 +2873,27 @@ static bool __is_valid_access(int off, int size)
offsetof(struct __sk_buff, cb[4]) + sizeof(__u32)) offsetof(struct __sk_buff, cb[4]) + sizeof(__u32))
return false; return false;
break; break;
case offsetof(struct __sk_buff, data) ...
offsetof(struct __sk_buff, data) + sizeof(__u32) - 1:
case offsetof(struct __sk_buff, data_end) ...
offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1:
if (size != sizeof(__u32))
return false;
break;
default: default:
/* permit narrower load for not cb/data/data_end fields */
*ctx_field_size = 4;
if (type == BPF_WRITE) {
if (size != sizeof(__u32)) if (size != sizeof(__u32))
return false; return false;
} else {
if (size != sizeof(__u32))
#ifdef __LITTLE_ENDIAN
return (off & 0x3) == 0 && (size == 1 || size == 2);
#else
return (off & 0x3) + size == 4 && (size == 1 || size == 2);
#endif
}
} }
return true; return true;
...@@ -2882,12 +2901,16 @@ static bool __is_valid_access(int off, int size) ...@@ -2882,12 +2901,16 @@ static bool __is_valid_access(int off, int size)
static bool sk_filter_is_valid_access(int off, int size, static bool sk_filter_is_valid_access(int off, int size,
enum bpf_access_type type, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type,
int *ctx_field_size)
{ {
switch (off) { switch (off) {
case offsetof(struct __sk_buff, tc_classid): case offsetof(struct __sk_buff, tc_classid) ...
case offsetof(struct __sk_buff, data): offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
case offsetof(struct __sk_buff, data_end): case offsetof(struct __sk_buff, data) ...
offsetof(struct __sk_buff, data) + sizeof(__u32) - 1:
case offsetof(struct __sk_buff, data_end) ...
offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1:
return false; return false;
} }
...@@ -2901,15 +2924,17 @@ static bool sk_filter_is_valid_access(int off, int size, ...@@ -2901,15 +2924,17 @@ static bool sk_filter_is_valid_access(int off, int size,
} }
} }
return __is_valid_access(off, size); return __is_valid_access(off, size, type, ctx_field_size);
} }
static bool lwt_is_valid_access(int off, int size, static bool lwt_is_valid_access(int off, int size,
enum bpf_access_type type, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type,
int *ctx_field_size)
{ {
switch (off) { switch (off) {
case offsetof(struct __sk_buff, tc_classid): case offsetof(struct __sk_buff, tc_classid) ...
offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
return false; return false;
} }
...@@ -2934,12 +2959,13 @@ static bool lwt_is_valid_access(int off, int size, ...@@ -2934,12 +2959,13 @@ static bool lwt_is_valid_access(int off, int size,
break; break;
} }
return __is_valid_access(off, size); return __is_valid_access(off, size, type, ctx_field_size);
} }
static bool sock_filter_is_valid_access(int off, int size, static bool sock_filter_is_valid_access(int off, int size,
enum bpf_access_type type, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type,
int *ctx_field_size)
{ {
if (type == BPF_WRITE) { if (type == BPF_WRITE) {
switch (off) { switch (off) {
...@@ -3002,7 +3028,8 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, ...@@ -3002,7 +3028,8 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
static bool tc_cls_act_is_valid_access(int off, int size, static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type,
int *ctx_field_size)
{ {
if (type == BPF_WRITE) { if (type == BPF_WRITE) {
switch (off) { switch (off) {
...@@ -3027,7 +3054,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, ...@@ -3027,7 +3054,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
break; break;
} }
return __is_valid_access(off, size); return __is_valid_access(off, size, type, ctx_field_size);
} }
static bool __is_valid_xdp_access(int off, int size) static bool __is_valid_xdp_access(int off, int size)
...@@ -3044,7 +3071,8 @@ static bool __is_valid_xdp_access(int off, int size) ...@@ -3044,7 +3071,8 @@ static bool __is_valid_xdp_access(int off, int size)
static bool xdp_is_valid_access(int off, int size, static bool xdp_is_valid_access(int off, int size,
enum bpf_access_type type, enum bpf_access_type type,
enum bpf_reg_type *reg_type) enum bpf_reg_type *reg_type,
int *ctx_field_size)
{ {
if (type == BPF_WRITE) if (type == BPF_WRITE)
return false; return false;
......
...@@ -14,7 +14,8 @@ LDLIBS += -lcap -lelf ...@@ -14,7 +14,8 @@ LDLIBS += -lcap -lelf
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_align
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o
TEST_PROGS := test_kmod.sh TEST_PROGS := test_kmod.sh
......
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include "bpf_helpers.h"
int _version SEC("version") = 1;
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
return TC_ACT_SHOT; \
}
SEC("test1")
int process(struct __sk_buff *skb)
{
TEST_FIELD(__u8, len, 0xFF);
TEST_FIELD(__u16, len, 0xFFFF);
TEST_FIELD(__u32, len, 0xFFFFFFFF);
TEST_FIELD(__u16, protocol, 0xFFFF);
TEST_FIELD(__u32, protocol, 0xFFFFFFFF);
TEST_FIELD(__u8, hash, 0xFF);
TEST_FIELD(__u16, hash, 0xFFFF);
TEST_FIELD(__u32, hash, 0xFFFFFFFF);
return TC_ACT_OK;
}
...@@ -484,6 +484,26 @@ static void test_bpf_obj_id(void) ...@@ -484,6 +484,26 @@ static void test_bpf_obj_id(void)
bpf_object__close(objs[i]); bpf_object__close(objs[i]);
} }
static void test_pkt_md_access(void)
{
const char *file = "./test_pkt_md_access.o";
struct bpf_object *obj;
__u32 duration, retval;
int err, prog_fd;
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (err)
return;
err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, &retval, &duration);
CHECK(err || retval, "",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
bpf_object__close(obj);
}
int main(void) int main(void)
{ {
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
...@@ -495,6 +515,7 @@ int main(void) ...@@ -495,6 +515,7 @@ int main(void)
test_l4lb(); test_l4lb();
test_tcp_estats(); test_tcp_estats();
test_bpf_obj_id(); test_bpf_obj_id();
test_pkt_md_access();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
......
...@@ -1073,44 +1073,75 @@ static struct bpf_test tests[] = { ...@@ -1073,44 +1073,75 @@ static struct bpf_test tests[] = {
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
"check cb access: byte, oob 1", "__sk_buff->hash, offset 0, byte store not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 4), offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: byte, oob 2", "__sk_buff->tc_index, offset 3, byte store not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) - 1), offsetof(struct __sk_buff, tc_index) + 3),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: byte, oob 3", "check skb->hash byte load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 4), offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash byte load not permitted 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: byte, oob 4", "check skb->hash byte load not permitted 2",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) - 1), offsetof(struct __sk_buff, hash) + 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load not permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
...@@ -1188,44 +1219,53 @@ static struct bpf_test tests[] = { ...@@ -1188,44 +1219,53 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: half, oob 1", "check __sk_buff->hash, offset 0, half store not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 4), offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: half, oob 2", "check __sk_buff->tc_index, offset 2, half store not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) - 2), offsetof(struct __sk_buff, tc_index) + 2),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: half, oob 3", "check skb->hash half load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 4), offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#endif
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .result = ACCEPT,
.result = REJECT,
}, },
{ {
"check cb access: half, oob 4", "check skb->hash half load not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) - 2), offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
...@@ -1366,28 +1406,6 @@ static struct bpf_test tests[] = { ...@@ -1366,28 +1406,6 @@ static struct bpf_test tests[] = {
}, },
{ {
"check cb access: double, oob 2", "check cb access: double, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 8),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) - 8),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 4",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
...@@ -1398,22 +1416,22 @@ static struct bpf_test tests[] = { ...@@ -1398,22 +1416,22 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: double, oob 5", "check __sk_buff->ifindex dw store not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 8), offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
.result = REJECT, .result = REJECT,
}, },
{ {
"check cb access: double, oob 6", "check __sk_buff->ifindex dw load not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) - 8), offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid bpf_context access", .errstr = "invalid bpf_context access",
...@@ -5169,6 +5187,98 @@ static struct bpf_test tests[] = { ...@@ -5169,6 +5187,98 @@ static struct bpf_test tests[] = {
}, },
.result = ACCEPT, .result = ACCEPT,
}, },
{
"check bpf_perf_event_data->sample_period byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 7),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 6),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period word load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 4),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period dword load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check skb->data half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"check skb->tc_classid half load not permitted for lwt prog",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
}; };
static int probe_filter_length(const struct bpf_insn *fp) static int probe_filter_length(const struct bpf_insn *fp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment