Commit 354e8f19 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov

bpf: Support <8-byte scalar spill and refill

The verifier currently does not save the reg state when
spilling <8byte bounded scalar to the stack.  The bpf program
will be incorrectly rejected when this scalar is refilled to
the reg and then used to offset into a packet header.
The later patch has a simplified bpf prog from a real use case
to demonstrate this case.  The current work around is
to reparse the packet again such that this offset scalar
is close to where the packet data will be accessed to
avoid the spill.  Thus, the header is parsed twice.

The llvm patch [1] will align the <8bytes spill to
the 8-byte stack address.  This can simplify the verifier
support by avoiding to store multiple reg states for
each 8 byte stack slot.

This patch changes the verifier to save the reg state when
spilling <8bytes scalar to the stack.  This reg state saving
is limited to spill aligned to the 8-byte stack address.
The current refill logic has already called coerce_reg_to_size(),
so coerce_reg_to_size() is not called on state->stack[spi].spilled_ptr
during spill.

When refilling in check_stack_read_fixed_off(),  it checks
the refill size is the same as the number of bytes marked with
STACK_SPILL before restoring the reg state.  When restoring
the reg state to state->regs[dst_regno], it needs
to avoid the state->regs[dst_regno].subreg_def being
over written because it has been marked by the check_reg_arg()
earlier [check_mem_access() is called after check_reg_arg() in
do_check()].  Reordering check_mem_access() and check_reg_arg()
will need a lot of changes in test_verifier's tests because
of the difference in verifier's error message.  Thus, the
patch here is to save the state->regs[dst_regno].subreg_def
first in check_stack_read_fixed_off().

There are cases that the verifier needs to scrub the spilled slot
from STACK_SPILL to STACK_MISC.  After this patch the spill is not always
in 8 bytes now, so it can no longer assume the other 7 bytes are always
marked as STACK_SPILL.  In particular, the scrub needs to avoid marking
an uninitialized byte from STACK_INVALID to STACK_MISC.  Otherwise, the
verifier will incorrectly accept bpf program reading uninitialized bytes
from the stack.  A new helper scrub_spilled_slot() is created for this
purpose.

[1]: https://reviews.llvm.org/D109073Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210922004941.625398-1-kafai@fb.com
parent 27113c59
...@@ -620,6 +620,12 @@ static bool is_spilled_reg(const struct bpf_stack_state *stack) ...@@ -620,6 +620,12 @@ static bool is_spilled_reg(const struct bpf_stack_state *stack)
return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
} }
static void scrub_spilled_slot(u8 *stype)
{
if (*stype != STACK_INVALID)
*stype = STACK_MISC;
}
static void print_verifier_state(struct bpf_verifier_env *env, static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state) const struct bpf_func_state *state)
{ {
...@@ -2634,15 +2640,21 @@ static bool __is_pointer_value(bool allow_ptr_leaks, ...@@ -2634,15 +2640,21 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
} }
static void save_register_state(struct bpf_func_state *state, static void save_register_state(struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg) int spi, struct bpf_reg_state *reg,
int size)
{ {
int i; int i;
state->stack[spi].spilled_ptr = *reg; state->stack[spi].spilled_ptr = *reg;
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++) for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
state->stack[spi].slot_type[i] = STACK_SPILL; state->stack[spi].slot_type[i - 1] = STACK_SPILL;
/* size < 8 bytes spill */
for (; i; i--)
scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
} }
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers, /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
...@@ -2689,7 +2701,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -2689,7 +2701,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true; env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
} }
if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) { !register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) { if (dst_reg != BPF_REG_FP) {
/* The backtracking logic can only recognize explicit /* The backtracking logic can only recognize explicit
...@@ -2702,7 +2714,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -2702,7 +2714,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
if (err) if (err)
return err; return err;
} }
save_register_state(state, spi, reg); save_register_state(state, spi, reg, size);
} else if (reg && is_spillable_regtype(reg->type)) { } else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */ /* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) { if (size != BPF_REG_SIZE) {
...@@ -2714,7 +2726,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -2714,7 +2726,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL; return -EINVAL;
} }
save_register_state(state, spi, reg); save_register_state(state, spi, reg, size);
} else { } else {
u8 type = STACK_MISC; u8 type = STACK_MISC;
...@@ -2723,7 +2735,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -2723,7 +2735,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */ /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
if (is_spilled_reg(&state->stack[spi])) if (is_spilled_reg(&state->stack[spi]))
for (i = 0; i < BPF_REG_SIZE; i++) for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_MISC; scrub_spilled_slot(&state->stack[spi].slot_type[i]);
/* only mark the slot as written if all 8 bytes were written /* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon * otherwise read propagation may incorrectly stop too soon
...@@ -2926,23 +2938,50 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, ...@@ -2926,23 +2938,50 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
struct bpf_reg_state *reg; struct bpf_reg_state *reg;
u8 *stype; u8 *stype, type;
stype = reg_state->stack[spi].slot_type; stype = reg_state->stack[spi].slot_type;
reg = &reg_state->stack[spi].spilled_ptr; reg = &reg_state->stack[spi].spilled_ptr;
if (is_spilled_reg(&reg_state->stack[spi])) { if (is_spilled_reg(&reg_state->stack[spi])) {
if (size != BPF_REG_SIZE) { if (size != BPF_REG_SIZE) {
u8 scalar_size = 0;
if (reg->type != SCALAR_VALUE) { if (reg->type != SCALAR_VALUE) {
verbose_linfo(env, env->insn_idx, "; "); verbose_linfo(env, env->insn_idx, "; ");
verbose(env, "invalid size of register fill\n"); verbose(env, "invalid size of register fill\n");
return -EACCES; return -EACCES;
} }
if (dst_regno >= 0) {
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
if (dst_regno < 0)
return 0;
for (i = BPF_REG_SIZE; i > 0 && stype[i - 1] == STACK_SPILL; i--)
scalar_size++;
if (!(off % BPF_REG_SIZE) && size == scalar_size) {
/* The earlier check_reg_arg() has decided the
* subreg_def for this insn. Save it first.
*/
s32 subreg_def = state->regs[dst_regno].subreg_def;
state->regs[dst_regno] = *reg;
state->regs[dst_regno].subreg_def = subreg_def;
} else {
for (i = 0; i < size; i++) {
type = stype[(slot - i) % BPF_REG_SIZE];
if (type == STACK_SPILL)
continue;
if (type == STACK_MISC)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
mark_reg_unknown(env, state->regs, dst_regno); mark_reg_unknown(env, state->regs, dst_regno);
state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
} }
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
return 0; return 0;
} }
for (i = 1; i < BPF_REG_SIZE; i++) { for (i = 1; i < BPF_REG_SIZE; i++) {
...@@ -2973,8 +3012,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, ...@@ -2973,8 +3012,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
} }
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else { } else {
u8 type;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
type = stype[(slot - i) % BPF_REG_SIZE]; type = stype[(slot - i) % BPF_REG_SIZE];
if (type == STACK_MISC) if (type == STACK_MISC)
...@@ -4532,7 +4569,7 @@ static int check_stack_range_initialized( ...@@ -4532,7 +4569,7 @@ static int check_stack_range_initialized(
if (clobber) { if (clobber) {
__mark_reg_unknown(env, &state->stack[spi].spilled_ptr); __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++) for (j = 0; j < BPF_REG_SIZE; j++)
state->stack[spi].slot_type[j] = STACK_MISC; scrub_spilled_slot(&state->stack[spi].slot_type[j]);
} }
goto mark; goto mark;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment