Commit 1682d8df authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2021-05-04

The following pull-request contains BPF updates for your *net* tree.

We've added 5 non-merge commits during the last 4 day(s) which contain
a total of 6 files changed, 52 insertions(+), 30 deletions(-).

The main changes are:

1) Fix libbpf overflow when processing BPF ring buffer in case of extreme
   application behavior, from Brendan Jackman.

2) Fix potential data leakage of uninitialized BPF stack under speculative
   execution, from Daniel Borkmann.

3) Fix off-by-one when validating xsk pool chunks, from Xuan Zhuo.

4) Fix snprintf BPF selftest with a pid filter to avoid racing its output
   test buffer, from Florent Revest.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bd1af6b5 ac31565c
......@@ -302,10 +302,11 @@ struct bpf_verifier_state_list {
};
/* Possible states for alu_state member. */
#define BPF_ALU_SANITIZE_SRC 1U
#define BPF_ALU_SANITIZE_DST 2U
#define BPF_ALU_SANITIZE_SRC (1U << 0)
#define BPF_ALU_SANITIZE_DST (1U << 1)
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_NON_POINTER (1U << 3)
#define BPF_ALU_IMMEDIATE (1U << 4)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
......
......@@ -6496,6 +6496,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
{
struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
struct bpf_verifier_state *vstate = env->cur_state;
bool off_is_imm = tnum_is_const(off_reg->var_off);
bool off_is_neg = off_reg->smin_value < 0;
bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code);
......@@ -6526,6 +6527,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
alu_limit = abs(tmp_aux->alu_limit - alu_limit);
} else {
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
}
......@@ -12371,7 +12373,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
struct bpf_insn *patch = &insn_buf[0];
bool issrc, isneg;
bool issrc, isneg, isimm;
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
......@@ -12382,28 +12384,29 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
BPF_ALU_SANITIZE_SRC;
isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
off_reg = issrc ? insn->src_reg : insn->dst_reg;
if (isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
if (issrc) {
*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
off_reg);
insn->src_reg = BPF_REG_AX;
if (isimm) {
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
} else {
*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
BPF_REG_AX);
if (isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
}
if (!issrc)
*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
insn->src_reg = BPF_REG_AX;
if (isneg)
insn->code = insn->code == code_add ?
code_sub : code_add;
*patch++ = *insn;
if (issrc && isneg)
if (issrc && isneg && !isimm)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
cnt = patch - insn_buf;
......
......@@ -128,13 +128,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
u64 chunk, chunk_end;
u64 chunk;
chunk = xp_aligned_extract_addr(pool, desc->addr);
chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
if (chunk != chunk_end)
if (desc->len > pool->chunk_size)
return false;
chunk = xp_aligned_extract_addr(pool, desc->addr);
if (chunk >= pool->addrs_cnt)
return false;
......
......@@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8;
}
static int ringbuf_process_ring(struct ring* r)
static int64_t ringbuf_process_ring(struct ring* r)
{
int *len_ptr, len, err, cnt = 0;
int *len_ptr, len, err;
/* 64-bit to avoid overflow in case of extreme application behavior */
int64_t cnt = 0;
unsigned long cons_pos, prod_pos;
bool got_new_data;
void *sample;
......@@ -244,12 +246,14 @@ static int ringbuf_process_ring(struct ring* r)
}
/* Consume available ring buffer(s) data without event polling.
* Returns number of records consumed across all registered ring buffers, or
* negative number if any of the callbacks return error.
* Returns number of records consumed across all registered ring buffers (or
* INT_MAX, whichever is less), or negative number if any of the callbacks
* return error.
*/
int ring_buffer__consume(struct ring_buffer *rb)
{
int i, err, res = 0;
int64_t err, res = 0;
int i;
for (i = 0; i < rb->ring_cnt; i++) {
struct ring *ring = &rb->rings[i];
......@@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
return err;
res += err;
}
if (res > INT_MAX)
return INT_MAX;
return res;
}
/* Poll for available data and consume records, if any are available.
* Returns number of records consumed, or negative number, if any of the
* registered callbacks returned error.
* Returns number of records consumed (or INT_MAX, whichever is less), or
* negative number, if any of the registered callbacks returned error.
*/
int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
{
int i, cnt, err, res = 0;
int i, cnt;
int64_t err, res = 0;
cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
if (cnt < 0)
return -errno;
for (i = 0; i < cnt; i++) {
__u32 ring_id = rb->events[i].data.fd;
struct ring *ring = &rb->rings[ring_id];
......@@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
return err;
res += err;
}
return cnt < 0 ? -errno : res;
if (res > INT_MAX)
return INT_MAX;
return res;
}
/* Get an fd that can be used to sleep until data is available in the ring(s) */
......
......@@ -43,6 +43,8 @@ void test_snprintf_positive(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->pid = getpid();
if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach"))
goto cleanup;
......
......@@ -5,6 +5,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
__u32 pid = 0;
char num_out[64] = {};
long num_ret = 0;
......@@ -42,6 +44,9 @@ int handler(const void *ctx)
static const char str1[] = "str1";
static const char longstr[] = "longstr";
if ((int)bpf_get_current_pid_tgid() != pid)
return 0;
/* Integer types */
num_ret = BPF_SNPRINTF(num_out, sizeof(num_out),
"%d %u %x %li %llu %lX",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment