Commit c25b2ae1 authored by Hao Luo's avatar Hao Luo Committed by Alexei Starovoitov

bpf: Replace PTR_TO_XXX_OR_NULL with PTR_TO_XXX | PTR_MAYBE_NULL

We have introduced a new type to make bpf_reg composable, by
allocating bits in the type to represent flags.

One of the flags is PTR_MAYBE_NULL which indicates a pointer
may be NULL. This patch switches the qualified reg_types to
use this flag. The reg_types changed in this patch include:

1. PTR_TO_MAP_VALUE_OR_NULL
2. PTR_TO_SOCKET_OR_NULL
3. PTR_TO_SOCK_COMMON_OR_NULL
4. PTR_TO_TCP_SOCK_OR_NULL
5. PTR_TO_BTF_ID_OR_NULL
6. PTR_TO_MEM_OR_NULL
7. PTR_TO_RDONLY_BUF_OR_NULL
8. PTR_TO_RDWR_BUF_OR_NULL
Signed-off-by: default avatarHao Luo <haoluo@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/r/20211217003152.48334-5-haoluo@google.com
parent 3c480732
...@@ -465,18 +465,15 @@ enum bpf_reg_type { ...@@ -465,18 +465,15 @@ enum bpf_reg_type {
PTR_TO_CTX, /* reg points to bpf_context */ PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */ PTR_TO_MAP_VALUE, /* reg points to map element value */
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ PTR_TO_MAP_KEY, /* reg points to a map element key */
PTR_TO_STACK, /* reg == frame_pointer + offset */ PTR_TO_STACK, /* reg == frame_pointer + offset */
PTR_TO_PACKET_META, /* skb->data - meta_len */ PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */ PTR_TO_PACKET_END, /* skb->data + headlen */
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
PTR_TO_SOCKET, /* reg points to struct bpf_sock */ PTR_TO_SOCKET, /* reg points to struct bpf_sock */
PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
PTR_TO_SOCK_COMMON, /* reg points to sock_common */ PTR_TO_SOCK_COMMON, /* reg points to sock_common */
PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
/* PTR_TO_BTF_ID points to a kernel struct that does not need /* PTR_TO_BTF_ID points to a kernel struct that does not need
...@@ -494,18 +491,21 @@ enum bpf_reg_type { ...@@ -494,18 +491,21 @@ enum bpf_reg_type {
* been checked for null. Used primarily to inform the verifier * been checked for null. Used primarily to inform the verifier
* an explicit null check is required for this struct. * an explicit null check is required for this struct.
*/ */
PTR_TO_BTF_ID_OR_NULL,
PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_MEM, /* reg points to valid memory region */
PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
PTR_TO_FUNC, /* reg points to a bpf program function */ PTR_TO_FUNC, /* reg points to a bpf program function */
PTR_TO_MAP_KEY, /* reg points to a map element key */
__BPF_REG_TYPE_MAX, __BPF_REG_TYPE_MAX,
/* Extended reg_types. */
PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MEM,
/* This must be the last entry. Its purpose is to ensure the enum is /* This must be the last entry. Its purpose is to ensure the enum is
* wide enough to hold the higher bits reserved for bpf_type_flag. * wide enough to hold the higher bits reserved for bpf_type_flag.
*/ */
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
* that converting umax_value to int cannot overflow. * that converting umax_value to int cannot overflow.
*/ */
#define BPF_MAX_VAR_SIZ (1 << 29) #define BPF_MAX_VAR_SIZ (1 << 29)
/* size of type_str_buf in bpf_verifier. */
#define TYPE_STR_BUF_LEN 64
/* Liveness marks, used for registers and spilled-regs (in stack slots). /* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that * Read marks propagate upwards until they find a write mark; they record that
...@@ -484,6 +486,8 @@ struct bpf_verifier_env { ...@@ -484,6 +486,8 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */ /* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots; u64 scratched_stack_slots;
u32 prev_log_len, prev_insn_print_len; u32 prev_log_len, prev_insn_print_len;
/* buffer used in reg_type_str() to generate reg_type string */
char type_str_buf[TYPE_STR_BUF_LEN];
}; };
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
......
...@@ -4940,10 +4940,13 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, ...@@ -4940,10 +4940,13 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
u32 type, flag;
type = base_type(ctx_arg_info->reg_type);
flag = type_flag(ctx_arg_info->reg_type);
if (ctx_arg_info->offset == off && if (ctx_arg_info->offset == off &&
(ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL || (type == PTR_TO_RDWR_BUF || type == PTR_TO_RDONLY_BUF) &&
ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) { (flag & PTR_MAYBE_NULL)) {
info->reg_type = ctx_arg_info->reg_type; info->reg_type = ctx_arg_info->reg_type;
return true; return true;
} }
......
...@@ -174,9 +174,9 @@ static const struct bpf_iter_reg bpf_map_elem_reg_info = { ...@@ -174,9 +174,9 @@ static const struct bpf_iter_reg bpf_map_elem_reg_info = {
.ctx_arg_info_size = 2, .ctx_arg_info_size = 2,
.ctx_arg_info = { .ctx_arg_info = {
{ offsetof(struct bpf_iter__bpf_map_elem, key), { offsetof(struct bpf_iter__bpf_map_elem, key),
PTR_TO_RDONLY_BUF_OR_NULL }, PTR_TO_RDONLY_BUF | PTR_MAYBE_NULL },
{ offsetof(struct bpf_iter__bpf_map_elem, value), { offsetof(struct bpf_iter__bpf_map_elem, value),
PTR_TO_RDWR_BUF_OR_NULL }, PTR_TO_RDWR_BUF | PTR_MAYBE_NULL },
}, },
}; };
......
...@@ -442,18 +442,6 @@ static bool reg_type_not_null(enum bpf_reg_type type) ...@@ -442,18 +442,6 @@ static bool reg_type_not_null(enum bpf_reg_type type)
type == PTR_TO_SOCK_COMMON; type == PTR_TO_SOCK_COMMON;
} }
static bool reg_type_may_be_null(enum bpf_reg_type type)
{
return type == PTR_TO_MAP_VALUE_OR_NULL ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_SOCK_COMMON_OR_NULL ||
type == PTR_TO_TCP_SOCK_OR_NULL ||
type == PTR_TO_BTF_ID_OR_NULL ||
type == PTR_TO_MEM_OR_NULL ||
type == PTR_TO_RDONLY_BUF_OR_NULL ||
type == PTR_TO_RDWR_BUF_OR_NULL;
}
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{ {
return reg->type == PTR_TO_MAP_VALUE && return reg->type == PTR_TO_MAP_VALUE &&
...@@ -462,12 +450,9 @@ static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) ...@@ -462,12 +450,9 @@ static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
{ {
return type == PTR_TO_SOCKET || return base_type(type) == PTR_TO_SOCKET ||
type == PTR_TO_SOCKET_OR_NULL || base_type(type) == PTR_TO_TCP_SOCK ||
type == PTR_TO_TCP_SOCK || base_type(type) == PTR_TO_MEM;
type == PTR_TO_TCP_SOCK_OR_NULL ||
type == PTR_TO_MEM ||
type == PTR_TO_MEM_OR_NULL;
} }
static bool arg_type_may_be_refcounted(enum bpf_arg_type type) static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
...@@ -537,39 +522,52 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn) ...@@ -537,39 +522,52 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn)
insn->imm == BPF_CMPXCHG; insn->imm == BPF_CMPXCHG;
} }
/* string representation of 'enum bpf_reg_type' */ /* string representation of 'enum bpf_reg_type'
static const char * const reg_type_str[] = { *
[NOT_INIT] = "?", * Note that reg_type_str() can not appear more than once in a single verbose()
[SCALAR_VALUE] = "inv", * statement.
[PTR_TO_CTX] = "ctx", */
[CONST_PTR_TO_MAP] = "map_ptr", static const char *reg_type_str(struct bpf_verifier_env *env,
[PTR_TO_MAP_VALUE] = "map_value", enum bpf_reg_type type)
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", {
[PTR_TO_STACK] = "fp", char postfix[16] = {0};
[PTR_TO_PACKET] = "pkt", static const char * const str[] = {
[PTR_TO_PACKET_META] = "pkt_meta", [NOT_INIT] = "?",
[PTR_TO_PACKET_END] = "pkt_end", [SCALAR_VALUE] = "inv",
[PTR_TO_FLOW_KEYS] = "flow_keys", [PTR_TO_CTX] = "ctx",
[PTR_TO_SOCKET] = "sock", [CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_SOCKET_OR_NULL] = "sock_or_null", [PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_SOCK_COMMON] = "sock_common", [PTR_TO_STACK] = "fp",
[PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", [PTR_TO_PACKET] = "pkt",
[PTR_TO_TCP_SOCK] = "tcp_sock", [PTR_TO_PACKET_META] = "pkt_meta",
[PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", [PTR_TO_PACKET_END] = "pkt_end",
[PTR_TO_TP_BUFFER] = "tp_buffer", [PTR_TO_FLOW_KEYS] = "flow_keys",
[PTR_TO_XDP_SOCK] = "xdp_sock", [PTR_TO_SOCKET] = "sock",
[PTR_TO_BTF_ID] = "ptr_", [PTR_TO_SOCK_COMMON] = "sock_common",
[PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", [PTR_TO_TCP_SOCK] = "tcp_sock",
[PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", [PTR_TO_TP_BUFFER] = "tp_buffer",
[PTR_TO_MEM] = "mem", [PTR_TO_XDP_SOCK] = "xdp_sock",
[PTR_TO_MEM_OR_NULL] = "mem_or_null", [PTR_TO_BTF_ID] = "ptr_",
[PTR_TO_RDONLY_BUF] = "rdonly_buf", [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
[PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", [PTR_TO_MEM] = "mem",
[PTR_TO_RDWR_BUF] = "rdwr_buf", [PTR_TO_RDONLY_BUF] = "rdonly_buf",
[PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", [PTR_TO_RDWR_BUF] = "rdwr_buf",
[PTR_TO_FUNC] = "func", [PTR_TO_FUNC] = "func",
[PTR_TO_MAP_KEY] = "map_key", [PTR_TO_MAP_KEY] = "map_key",
}; };
if (type & PTR_MAYBE_NULL) {
if (base_type(type) == PTR_TO_BTF_ID ||
base_type(type) == PTR_TO_PERCPU_BTF_ID)
strncpy(postfix, "or_null_", 16);
else
strncpy(postfix, "_or_null", 16);
}
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s",
str[base_type(type)], postfix);
return env->type_str_buf;
}
static char slot_type_char[] = { static char slot_type_char[] = {
[STACK_INVALID] = '?', [STACK_INVALID] = '?',
...@@ -675,7 +673,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -675,7 +673,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
continue; continue;
verbose(env, " R%d", i); verbose(env, " R%d", i);
print_liveness(env, reg->live); print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str[t]); verbose(env, "=%s", reg_type_str(env, t));
if (t == SCALAR_VALUE && reg->precise) if (t == SCALAR_VALUE && reg->precise)
verbose(env, "P"); verbose(env, "P");
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
...@@ -683,9 +681,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -683,9 +681,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
/* reg->off should be 0 for SCALAR_VALUE */ /* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off); verbose(env, "%lld", reg->var_off.value + reg->off);
} else { } else {
if (t == PTR_TO_BTF_ID || if (base_type(t) == PTR_TO_BTF_ID ||
t == PTR_TO_BTF_ID_OR_NULL || base_type(t) == PTR_TO_PERCPU_BTF_ID)
t == PTR_TO_PERCPU_BTF_ID)
verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
verbose(env, "(id=%d", reg->id); verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t)) if (reg_type_may_be_refcounted_or_null(t))
...@@ -694,10 +691,9 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -694,10 +691,9 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose(env, ",off=%d", reg->off); verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t)) if (type_is_pkt_pointer(t))
verbose(env, ",r=%d", reg->range); verbose(env, ",r=%d", reg->range);
else if (t == CONST_PTR_TO_MAP || else if (base_type(t) == CONST_PTR_TO_MAP ||
t == PTR_TO_MAP_KEY || base_type(t) == PTR_TO_MAP_KEY ||
t == PTR_TO_MAP_VALUE || base_type(t) == PTR_TO_MAP_VALUE)
t == PTR_TO_MAP_VALUE_OR_NULL)
verbose(env, ",ks=%d,vs=%d", verbose(env, ",ks=%d,vs=%d",
reg->map_ptr->key_size, reg->map_ptr->key_size,
reg->map_ptr->value_size); reg->map_ptr->value_size);
...@@ -769,7 +765,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -769,7 +765,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
if (is_spilled_reg(&state->stack[i])) { if (is_spilled_reg(&state->stack[i])) {
reg = &state->stack[i].spilled_ptr; reg = &state->stack[i].spilled_ptr;
t = reg->type; t = reg->type;
verbose(env, "=%s", reg_type_str[t]); verbose(env, "=%s", reg_type_str(env, t));
if (t == SCALAR_VALUE && reg->precise) if (t == SCALAR_VALUE && reg->precise)
verbose(env, "P"); verbose(env, "P");
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
...@@ -1202,8 +1198,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env, ...@@ -1202,8 +1198,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
{ {
switch (reg->type) { if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
case PTR_TO_MAP_VALUE_OR_NULL: {
const struct bpf_map *map = reg->map_ptr; const struct bpf_map *map = reg->map_ptr;
if (map->inner_map_meta) { if (map->inner_map_meta) {
...@@ -1222,32 +1217,10 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) ...@@ -1222,32 +1217,10 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
} else { } else {
reg->type = PTR_TO_MAP_VALUE; reg->type = PTR_TO_MAP_VALUE;
} }
break; return;
}
case PTR_TO_SOCKET_OR_NULL:
reg->type = PTR_TO_SOCKET;
break;
case PTR_TO_SOCK_COMMON_OR_NULL:
reg->type = PTR_TO_SOCK_COMMON;
break;
case PTR_TO_TCP_SOCK_OR_NULL:
reg->type = PTR_TO_TCP_SOCK;
break;
case PTR_TO_BTF_ID_OR_NULL:
reg->type = PTR_TO_BTF_ID;
break;
case PTR_TO_MEM_OR_NULL:
reg->type = PTR_TO_MEM;
break;
case PTR_TO_RDONLY_BUF_OR_NULL:
reg->type = PTR_TO_RDONLY_BUF;
break;
case PTR_TO_RDWR_BUF_OR_NULL:
reg->type = PTR_TO_RDWR_BUF;
break;
default:
WARN_ONCE(1, "unknown nullable register type");
} }
reg->type &= ~PTR_MAYBE_NULL;
} }
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
...@@ -2103,7 +2076,7 @@ static int mark_reg_read(struct bpf_verifier_env *env, ...@@ -2103,7 +2076,7 @@ static int mark_reg_read(struct bpf_verifier_env *env,
break; break;
if (parent->live & REG_LIVE_DONE) { if (parent->live & REG_LIVE_DONE) {
verbose(env, "verifier BUG type %s var_off %lld off %d\n", verbose(env, "verifier BUG type %s var_off %lld off %d\n",
reg_type_str[parent->type], reg_type_str(env, parent->type),
parent->var_off.value, parent->off); parent->var_off.value, parent->off);
return -EFAULT; return -EFAULT;
} }
...@@ -2768,9 +2741,8 @@ static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) ...@@ -2768,9 +2741,8 @@ static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
static bool is_spillable_regtype(enum bpf_reg_type type) static bool is_spillable_regtype(enum bpf_reg_type type)
{ {
switch (type) { switch (base_type(type)) {
case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE:
case PTR_TO_MAP_VALUE_OR_NULL:
case PTR_TO_STACK: case PTR_TO_STACK:
case PTR_TO_CTX: case PTR_TO_CTX:
case PTR_TO_PACKET: case PTR_TO_PACKET:
...@@ -2779,21 +2751,14 @@ static bool is_spillable_regtype(enum bpf_reg_type type) ...@@ -2779,21 +2751,14 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_FLOW_KEYS: case PTR_TO_FLOW_KEYS:
case CONST_PTR_TO_MAP: case CONST_PTR_TO_MAP:
case PTR_TO_SOCKET: case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON:
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK: case PTR_TO_XDP_SOCK:
case PTR_TO_BTF_ID: case PTR_TO_BTF_ID:
case PTR_TO_BTF_ID_OR_NULL:
case PTR_TO_RDONLY_BUF: case PTR_TO_RDONLY_BUF:
case PTR_TO_RDONLY_BUF_OR_NULL:
case PTR_TO_RDWR_BUF: case PTR_TO_RDWR_BUF:
case PTR_TO_RDWR_BUF_OR_NULL:
case PTR_TO_PERCPU_BTF_ID: case PTR_TO_PERCPU_BTF_ID:
case PTR_TO_MEM: case PTR_TO_MEM:
case PTR_TO_MEM_OR_NULL:
case PTR_TO_FUNC: case PTR_TO_FUNC:
case PTR_TO_MAP_KEY: case PTR_TO_MAP_KEY:
return true; return true;
...@@ -3633,7 +3598,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, ...@@ -3633,7 +3598,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
*/ */
*reg_type = info.reg_type; *reg_type = info.reg_type;
if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) { if (base_type(*reg_type) == PTR_TO_BTF_ID) {
*btf = info.btf; *btf = info.btf;
*btf_id = info.btf_id; *btf_id = info.btf_id;
} else { } else {
...@@ -3701,7 +3666,7 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, ...@@ -3701,7 +3666,7 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
} }
verbose(env, "R%d invalid %s access off=%d size=%d\n", verbose(env, "R%d invalid %s access off=%d size=%d\n",
regno, reg_type_str[reg->type], off, size); regno, reg_type_str(env, reg->type), off, size);
return -EACCES; return -EACCES;
} }
...@@ -4466,7 +4431,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -4466,7 +4431,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
} else { } else {
mark_reg_known_zero(env, regs, mark_reg_known_zero(env, regs,
value_regno); value_regno);
if (reg_type_may_be_null(reg_type)) if (type_may_be_null(reg_type))
regs[value_regno].id = ++env->id_gen; regs[value_regno].id = ++env->id_gen;
/* A load of ctx field could have different /* A load of ctx field could have different
* actual load size with the one encoded in the * actual load size with the one encoded in the
...@@ -4474,8 +4439,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -4474,8 +4439,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
* a sub-register. * a sub-register.
*/ */
regs[value_regno].subreg_def = DEF_NOT_SUBREG; regs[value_regno].subreg_def = DEF_NOT_SUBREG;
if (reg_type == PTR_TO_BTF_ID || if (base_type(reg_type) == PTR_TO_BTF_ID) {
reg_type == PTR_TO_BTF_ID_OR_NULL) {
regs[value_regno].btf = btf; regs[value_regno].btf = btf;
regs[value_regno].btf_id = btf_id; regs[value_regno].btf_id = btf_id;
} }
...@@ -4528,7 +4492,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -4528,7 +4492,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
} else if (type_is_sk_pointer(reg->type)) { } else if (type_is_sk_pointer(reg->type)) {
if (t == BPF_WRITE) { if (t == BPF_WRITE) {
verbose(env, "R%d cannot write into %s\n", verbose(env, "R%d cannot write into %s\n",
regno, reg_type_str[reg->type]); regno, reg_type_str(env, reg->type));
return -EACCES; return -EACCES;
} }
err = check_sock_access(env, insn_idx, regno, off, size, t); err = check_sock_access(env, insn_idx, regno, off, size, t);
...@@ -4547,7 +4511,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -4547,7 +4511,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
} else if (reg->type == PTR_TO_RDONLY_BUF) { } else if (reg->type == PTR_TO_RDONLY_BUF) {
if (t == BPF_WRITE) { if (t == BPF_WRITE) {
verbose(env, "R%d cannot write into %s\n", verbose(env, "R%d cannot write into %s\n",
regno, reg_type_str[reg->type]); regno, reg_type_str(env, reg->type));
return -EACCES; return -EACCES;
} }
err = check_buffer_access(env, reg, regno, off, size, false, err = check_buffer_access(env, reg, regno, off, size, false,
...@@ -4563,7 +4527,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -4563,7 +4527,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
mark_reg_unknown(env, regs, value_regno); mark_reg_unknown(env, regs, value_regno);
} else { } else {
verbose(env, "R%d invalid mem access '%s'\n", regno, verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str[reg->type]); reg_type_str(env, reg->type));
return -EACCES; return -EACCES;
} }
...@@ -4630,7 +4594,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i ...@@ -4630,7 +4594,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
is_sk_reg(env, insn->dst_reg)) { is_sk_reg(env, insn->dst_reg)) {
verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
insn->dst_reg, insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]); reg_type_str(env, reg_state(env, insn->dst_reg)->type));
return -EACCES; return -EACCES;
} }
...@@ -4850,9 +4814,9 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, ...@@ -4850,9 +4814,9 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
register_is_null(reg)) register_is_null(reg))
return 0; return 0;
verbose(env, "R%d type=%s expected=%s\n", regno, verbose(env, "R%d type=%s ", regno,
reg_type_str[reg->type], reg_type_str(env, reg->type));
reg_type_str[PTR_TO_STACK]); verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
return -EACCES; return -EACCES;
} }
} }
...@@ -4863,7 +4827,7 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, ...@@ -4863,7 +4827,7 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
if (register_is_null(reg)) if (register_is_null(reg))
return 0; return 0;
if (reg_type_may_be_null(reg->type)) { if (type_may_be_null(reg->type)) {
/* Assuming that the register contains a value check if the memory /* Assuming that the register contains a value check if the memory
* access is safe. Temporarily save and restore the register's state as * access is safe. Temporarily save and restore the register's state as
* the conversion shouldn't be visible to a caller. * the conversion shouldn't be visible to a caller.
...@@ -5197,10 +5161,10 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, ...@@ -5197,10 +5161,10 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
goto found; goto found;
} }
verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]); verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, type));
for (j = 0; j + 1 < i; j++) for (j = 0; j + 1 < i; j++)
verbose(env, "%s, ", reg_type_str[compatible->types[j]]); verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
verbose(env, "%s\n", reg_type_str[compatible->types[j]]); verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
return -EACCES; return -EACCES;
found: found:
...@@ -6474,6 +6438,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6474,6 +6438,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
{ {
const struct bpf_func_proto *fn = NULL; const struct bpf_func_proto *fn = NULL;
enum bpf_return_type ret_type; enum bpf_return_type ret_type;
enum bpf_type_flag ret_flag;
struct bpf_reg_state *regs; struct bpf_reg_state *regs;
struct bpf_call_arg_meta meta; struct bpf_call_arg_meta meta;
int insn_idx = *insn_idx_p; int insn_idx = *insn_idx_p;
...@@ -6614,6 +6579,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6614,6 +6579,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
/* update return register (already marked as written above) */ /* update return register (already marked as written above) */
ret_type = fn->ret_type; ret_type = fn->ret_type;
ret_flag = type_flag(fn->ret_type);
if (ret_type == RET_INTEGER) { if (ret_type == RET_INTEGER) {
/* sets type to SCALAR_VALUE */ /* sets type to SCALAR_VALUE */
mark_reg_unknown(env, regs, BPF_REG_0); mark_reg_unknown(env, regs, BPF_REG_0);
...@@ -6633,25 +6599,23 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6633,25 +6599,23 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
} }
regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].map_uid = meta.map_uid; regs[BPF_REG_0].map_uid = meta.map_uid;
if (type_may_be_null(ret_type)) { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; if (!type_may_be_null(ret_type) &&
} else { map_value_has_spin_lock(meta.map_ptr)) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; regs[BPF_REG_0].id = ++env->id_gen;
if (map_value_has_spin_lock(meta.map_ptr))
regs[BPF_REG_0].id = ++env->id_gen;
} }
} else if (base_type(ret_type) == RET_PTR_TO_SOCKET) { } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
} else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) { } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
} else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) { } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
} else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) { } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
regs[BPF_REG_0].mem_size = meta.mem_size; regs[BPF_REG_0].mem_size = meta.mem_size;
} else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) { } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
const struct btf_type *t; const struct btf_type *t;
...@@ -6671,14 +6635,10 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6671,14 +6635,10 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
tname, PTR_ERR(ret)); tname, PTR_ERR(ret));
return -EINVAL; return -EINVAL;
} }
regs[BPF_REG_0].type = regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
(ret_type & PTR_MAYBE_NULL) ?
PTR_TO_MEM_OR_NULL : PTR_TO_MEM;
regs[BPF_REG_0].mem_size = tsize; regs[BPF_REG_0].mem_size = tsize;
} else { } else {
regs[BPF_REG_0].type = regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
(ret_type & PTR_MAYBE_NULL) ?
PTR_TO_BTF_ID_OR_NULL : PTR_TO_BTF_ID;
regs[BPF_REG_0].btf = meta.ret_btf; regs[BPF_REG_0].btf = meta.ret_btf;
regs[BPF_REG_0].btf_id = meta.ret_btf_id; regs[BPF_REG_0].btf_id = meta.ret_btf_id;
} }
...@@ -6686,9 +6646,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6686,9 +6646,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
int ret_btf_id; int ret_btf_id;
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = (ret_type & PTR_MAYBE_NULL) ? regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
PTR_TO_BTF_ID_OR_NULL :
PTR_TO_BTF_ID;
ret_btf_id = *fn->ret_btf_id; ret_btf_id = *fn->ret_btf_id;
if (ret_btf_id == 0) { if (ret_btf_id == 0) {
verbose(env, "invalid return type %u of func %s#%d\n", verbose(env, "invalid return type %u of func %s#%d\n",
...@@ -6707,7 +6665,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6707,7 +6665,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL; return -EINVAL;
} }
if (reg_type_may_be_null(regs[BPF_REG_0].type)) if (type_may_be_null(regs[BPF_REG_0].type))
regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].id = ++env->id_gen;
if (is_ptr_cast_function(func_id)) { if (is_ptr_cast_function(func_id)) {
...@@ -6916,25 +6874,25 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env, ...@@ -6916,25 +6874,25 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "math between %s pointer and %lld is not allowed\n", verbose(env, "math between %s pointer and %lld is not allowed\n",
reg_type_str[type], val); reg_type_str(env, type), val);
return false; return false;
} }
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
verbose(env, "%s pointer offset %d is not allowed\n", verbose(env, "%s pointer offset %d is not allowed\n",
reg_type_str[type], reg->off); reg_type_str(env, type), reg->off);
return false; return false;
} }
if (smin == S64_MIN) { if (smin == S64_MIN) {
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
reg_type_str[type]); reg_type_str(env, type));
return false; return false;
} }
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
verbose(env, "value %lld makes %s pointer be out of bounds\n", verbose(env, "value %lld makes %s pointer be out of bounds\n",
smin, reg_type_str[type]); smin, reg_type_str(env, type));
return false; return false;
} }
...@@ -7311,11 +7269,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -7311,11 +7269,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return -EACCES; return -EACCES;
} }
switch (ptr_reg->type) { if (ptr_reg->type & PTR_MAYBE_NULL) {
case PTR_TO_MAP_VALUE_OR_NULL:
verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
dst, reg_type_str[ptr_reg->type]); dst, reg_type_str(env, ptr_reg->type));
return -EACCES; return -EACCES;
}
switch (base_type(ptr_reg->type)) {
case CONST_PTR_TO_MAP: case CONST_PTR_TO_MAP:
/* smin_val represents the known value */ /* smin_val represents the known value */
if (known && smin_val == 0 && opcode == BPF_ADD) if (known && smin_val == 0 && opcode == BPF_ADD)
...@@ -7323,14 +7283,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -7323,14 +7283,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
fallthrough; fallthrough;
case PTR_TO_PACKET_END: case PTR_TO_PACKET_END:
case PTR_TO_SOCKET: case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON:
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK: case PTR_TO_XDP_SOCK:
verbose(env, "R%d pointer arithmetic on %s prohibited\n", verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str[ptr_reg->type]); dst, reg_type_str(env, ptr_reg->type));
return -EACCES; return -EACCES;
default: default:
break; break;
...@@ -9049,7 +9006,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, ...@@ -9049,7 +9006,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
struct bpf_reg_state *reg, u32 id, struct bpf_reg_state *reg, u32 id,
bool is_null) bool is_null)
{ {
if (reg_type_may_be_null(reg->type) && reg->id == id && if (type_may_be_null(reg->type) && reg->id == id &&
!WARN_ON_ONCE(!reg->id)) { !WARN_ON_ONCE(!reg->id)) {
/* Old offset (both fixed and variable parts) should /* Old offset (both fixed and variable parts) should
* have been known-zero, because we don't allow pointer * have been known-zero, because we don't allow pointer
...@@ -9427,7 +9384,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -9427,7 +9384,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
*/ */
if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
reg_type_may_be_null(dst_reg->type)) { type_may_be_null(dst_reg->type)) {
/* Mark all identical registers in each branch as either /* Mark all identical registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional. * safe or unknown depending R == 0 or R != 0 conditional.
*/ */
...@@ -9681,7 +9638,7 @@ static int check_return_code(struct bpf_verifier_env *env) ...@@ -9681,7 +9638,7 @@ static int check_return_code(struct bpf_verifier_env *env)
/* enforce return zero from async callbacks like timer */ /* enforce return zero from async callbacks like timer */
if (reg->type != SCALAR_VALUE) { if (reg->type != SCALAR_VALUE) {
verbose(env, "In async callback the register R0 is not a known value (%s)\n", verbose(env, "In async callback the register R0 is not a known value (%s)\n",
reg_type_str[reg->type]); reg_type_str(env, reg->type));
return -EINVAL; return -EINVAL;
} }
...@@ -9695,7 +9652,7 @@ static int check_return_code(struct bpf_verifier_env *env) ...@@ -9695,7 +9652,7 @@ static int check_return_code(struct bpf_verifier_env *env)
if (is_subprog) { if (is_subprog) {
if (reg->type != SCALAR_VALUE) { if (reg->type != SCALAR_VALUE) {
verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
reg_type_str[reg->type]); reg_type_str(env, reg->type));
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -9759,7 +9716,7 @@ static int check_return_code(struct bpf_verifier_env *env) ...@@ -9759,7 +9716,7 @@ static int check_return_code(struct bpf_verifier_env *env)
if (reg->type != SCALAR_VALUE) { if (reg->type != SCALAR_VALUE) {
verbose(env, "At program exit the register R0 is not a known value (%s)\n", verbose(env, "At program exit the register R0 is not a known value (%s)\n",
reg_type_str[reg->type]); reg_type_str(env, reg->type));
return -EINVAL; return -EINVAL;
} }
...@@ -10616,7 +10573,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, ...@@ -10616,7 +10573,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
return true; return true;
if (rcur->type == NOT_INIT) if (rcur->type == NOT_INIT)
return false; return false;
switch (rold->type) { switch (base_type(rold->type)) {
case SCALAR_VALUE: case SCALAR_VALUE:
if (env->explore_alu_limits) if (env->explore_alu_limits)
return false; return false;
...@@ -10638,6 +10595,22 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, ...@@ -10638,6 +10595,22 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
} }
case PTR_TO_MAP_KEY: case PTR_TO_MAP_KEY:
case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE:
/* a PTR_TO_MAP_VALUE could be safe to use as a
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
* checked, doing so could have affected others with the same
* id, and we can't check for that because we lost the id when
* we converted to a PTR_TO_MAP_VALUE.
*/
if (type_may_be_null(rold->type)) {
if (!type_may_be_null(rcur->type))
return false;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
return false;
/* Check our ids match any regs they're supposed to */
return check_ids(rold->id, rcur->id, idmap);
}
/* If the new min/max/var_off satisfy the old ones and /* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK. * everything else matches, we are OK.
* 'id' is not compared, since it's only used for maps with * 'id' is not compared, since it's only used for maps with
...@@ -10649,20 +10622,6 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, ...@@ -10649,20 +10622,6 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
range_within(rold, rcur) && range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off); tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_VALUE_OR_NULL:
/* a PTR_TO_MAP_VALUE could be safe to use as a
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
* checked, doing so could have affected others with the same
* id, and we can't check for that because we lost the id when
* we converted to a PTR_TO_MAP_VALUE.
*/
if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
return false;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
return false;
/* Check our ids match any regs they're supposed to */
return check_ids(rold->id, rcur->id, idmap);
case PTR_TO_PACKET_META: case PTR_TO_PACKET_META:
case PTR_TO_PACKET: case PTR_TO_PACKET:
if (rcur->type != rold->type) if (rcur->type != rold->type)
...@@ -10691,11 +10650,8 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, ...@@ -10691,11 +10650,8 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
case PTR_TO_PACKET_END: case PTR_TO_PACKET_END:
case PTR_TO_FLOW_KEYS: case PTR_TO_FLOW_KEYS:
case PTR_TO_SOCKET: case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON:
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK: case PTR_TO_XDP_SOCK:
/* Only valid matches are exact, which memcmp() above /* Only valid matches are exact, which memcmp() above
* would have accepted * would have accepted
...@@ -11221,17 +11177,13 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) ...@@ -11221,17 +11177,13 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
/* Return true if it's OK to have the same insn return a different type. */ /* Return true if it's OK to have the same insn return a different type. */
static bool reg_type_mismatch_ok(enum bpf_reg_type type) static bool reg_type_mismatch_ok(enum bpf_reg_type type)
{ {
switch (type) { switch (base_type(type)) {
case PTR_TO_CTX: case PTR_TO_CTX:
case PTR_TO_SOCKET: case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON:
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK: case PTR_TO_XDP_SOCK:
case PTR_TO_BTF_ID: case PTR_TO_BTF_ID:
case PTR_TO_BTF_ID_OR_NULL:
return false; return false;
default: default:
return true; return true;
...@@ -11457,7 +11409,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -11457,7 +11409,7 @@ static int do_check(struct bpf_verifier_env *env)
if (is_ctx_reg(env, insn->dst_reg)) { if (is_ctx_reg(env, insn->dst_reg)) {
verbose(env, "BPF_ST stores into R%d %s is not allowed\n", verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
insn->dst_reg, insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]); reg_type_str(env, reg_state(env, insn->dst_reg)->type));
return -EACCES; return -EACCES;
} }
......
...@@ -929,7 +929,7 @@ static struct bpf_iter_reg bpf_sk_storage_map_reg_info = { ...@@ -929,7 +929,7 @@ static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk), { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
PTR_TO_BTF_ID_OR_NULL }, PTR_TO_BTF_ID_OR_NULL },
{ offsetof(struct bpf_iter__bpf_sk_storage_map, value), { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
PTR_TO_RDWR_BUF_OR_NULL }, PTR_TO_RDWR_BUF | PTR_MAYBE_NULL },
}, },
.seq_info = &iter_seq_info, .seq_info = &iter_seq_info,
}; };
......
...@@ -1564,7 +1564,7 @@ static struct bpf_iter_reg sock_map_iter_reg = { ...@@ -1564,7 +1564,7 @@ static struct bpf_iter_reg sock_map_iter_reg = {
.ctx_arg_info_size = 2, .ctx_arg_info_size = 2,
.ctx_arg_info = { .ctx_arg_info = {
{ offsetof(struct bpf_iter__sockmap, key), { offsetof(struct bpf_iter__sockmap, key),
PTR_TO_RDONLY_BUF_OR_NULL }, PTR_TO_RDONLY_BUF | PTR_MAYBE_NULL },
{ offsetof(struct bpf_iter__sockmap, sk), { offsetof(struct bpf_iter__sockmap, sk),
PTR_TO_BTF_ID_OR_NULL }, PTR_TO_BTF_ID_OR_NULL },
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment