Commit e7d5184b authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf: Support <8-byte scalar spill and refill'

Martin KaFai says:

====================

The verifier currently does not save the reg state when
spilling <8byte bounded scalar to the stack.  The bpf program
will be incorrectly rejected when this scalar is refilled to
the reg and then used to offset into a packet header.
The later patch has a simplified bpf prog from a real use case
to demonstrate this case.  The current work around is
to reparse the packet again such that this offset scalar
is close to where the packet data will be accessed to
avoid the spill.  Thus, the header is parsed twice.

The llvm patch [1] will align the <8bytes spill to
the 8-byte stack address.  This set is to make the necessary
changes in verifier to support <8byte scalar spill and refill.

[1] https://reviews.llvm.org/D109073

v2:
- Changed the xdpwall selftest in patch 3 to trigger a u32
  spill at a non 8-byte aligned stack address.  The v1 has
  simplified the real example too much such that it only
  triggers a u32 spill but does not spill at a non
  8-byte aligned stack address.
- Changed README.rst in patch 3 to explain the llvm dependency
  for the xdpwall test.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 091037fb ef979017
......@@ -612,6 +612,20 @@ static const char *kernel_type_name(const struct btf* btf, u32 id)
return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
}
/* The reg state of a pointer or a bounded scalar was saved when
* it was spilled to the stack.
*/
static bool is_spilled_reg(const struct bpf_stack_state *stack)
{
return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
}
static void scrub_spilled_slot(u8 *stype)
{
if (*stype != STACK_INVALID)
*stype = STACK_MISC;
}
static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state)
{
......@@ -717,7 +731,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live);
if (state->stack[i].slot_type[0] == STACK_SPILL) {
if (is_spilled_reg(&state->stack[i])) {
reg = &state->stack[i].spilled_ptr;
t = reg->type;
verbose(env, "=%s", reg_type_str[t]);
......@@ -2373,7 +2387,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
reg->precise = true;
}
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
if (func->stack[j].slot_type[0] != STACK_SPILL)
if (!is_spilled_reg(&func->stack[j]))
continue;
reg = &func->stack[j].spilled_ptr;
if (reg->type != SCALAR_VALUE)
......@@ -2415,7 +2429,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
}
while (spi >= 0) {
if (func->stack[spi].slot_type[0] != STACK_SPILL) {
if (!is_spilled_reg(&func->stack[spi])) {
stack_mask = 0;
break;
}
......@@ -2514,7 +2528,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
return 0;
}
if (func->stack[i].slot_type[0] != STACK_SPILL) {
if (!is_spilled_reg(&func->stack[i])) {
stack_mask &= ~(1ull << i);
continue;
}
......@@ -2626,15 +2640,21 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
}
static void save_register_state(struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg)
int spi, struct bpf_reg_state *reg,
int size)
{
int i;
state->stack[spi].spilled_ptr = *reg;
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
state->stack[spi].slot_type[i - 1] = STACK_SPILL;
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_SPILL;
/* size < 8 bytes spill */
for (; i; i--)
scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
}
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
......@@ -2681,7 +2701,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
}
if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) {
/* The backtracking logic can only recognize explicit
......@@ -2694,7 +2714,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
if (err)
return err;
}
save_register_state(state, spi, reg);
save_register_state(state, spi, reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
......@@ -2706,16 +2726,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
save_register_state(state, spi, reg);
save_register_state(state, spi, reg, size);
} else {
u8 type = STACK_MISC;
/* regular write of data into stack destroys any spilled ptr */
state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
if (state->stack[spi].slot_type[0] == STACK_SPILL)
if (is_spilled_reg(&state->stack[spi]))
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_MISC;
scrub_spilled_slot(&state->stack[spi].slot_type[i]);
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
......@@ -2918,23 +2938,50 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
struct bpf_reg_state *reg;
u8 *stype;
u8 *stype, type;
stype = reg_state->stack[spi].slot_type;
reg = &reg_state->stack[spi].spilled_ptr;
if (stype[0] == STACK_SPILL) {
if (is_spilled_reg(&reg_state->stack[spi])) {
if (size != BPF_REG_SIZE) {
u8 scalar_size = 0;
if (reg->type != SCALAR_VALUE) {
verbose_linfo(env, env->insn_idx, "; ");
verbose(env, "invalid size of register fill\n");
return -EACCES;
}
if (dst_regno >= 0) {
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
if (dst_regno < 0)
return 0;
for (i = BPF_REG_SIZE; i > 0 && stype[i - 1] == STACK_SPILL; i--)
scalar_size++;
if (!(off % BPF_REG_SIZE) && size == scalar_size) {
/* The earlier check_reg_arg() has decided the
* subreg_def for this insn. Save it first.
*/
s32 subreg_def = state->regs[dst_regno].subreg_def;
state->regs[dst_regno] = *reg;
state->regs[dst_regno].subreg_def = subreg_def;
} else {
for (i = 0; i < size; i++) {
type = stype[(slot - i) % BPF_REG_SIZE];
if (type == STACK_SPILL)
continue;
if (type == STACK_MISC)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
mark_reg_unknown(env, state->regs, dst_regno);
state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
return 0;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
......@@ -2965,8 +3012,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else {
u8 type;
for (i = 0; i < size; i++) {
type = stype[(slot - i) % BPF_REG_SIZE];
if (type == STACK_MISC)
......@@ -4514,17 +4559,17 @@ static int check_stack_range_initialized(
goto mark;
}
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
if (is_spilled_reg(&state->stack[spi]) &&
state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
goto mark;
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
if (is_spilled_reg(&state->stack[spi]) &&
(state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
env->allow_ptr_leaks)) {
if (clobber) {
__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
state->stack[spi].slot_type[j] = STACK_MISC;
scrub_spilled_slot(&state->stack[spi].slot_type[j]);
}
goto mark;
}
......@@ -10356,9 +10401,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
* return false to continue verification of this path
*/
return false;
if (i % BPF_REG_SIZE)
if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL)
if (!is_spilled_reg(&old->stack[spi]))
continue;
if (!regsafe(env, &old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr, idmap))
......@@ -10565,7 +10610,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
if (!is_spilled_reg(&state->stack[i]))
continue;
state_reg = &state->stack[i].spilled_ptr;
if (state_reg->type != SCALAR_VALUE ||
......
......@@ -242,3 +242,16 @@ To fix this issue, user newer libbpf.
.. Links
.. _clang reloc patch: https://reviews.llvm.org/D102712
.. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst
Clang dependencies for the u32 spill test (xdpwall)
===================================================
The xdpwall selftest requires a change in `Clang 14`__.
Without it, the xdpwall selftest will fail and the error message
from running test_progs will look like:
.. code-block:: console
test_xdpwall:FAIL:Does LLVM have https://reviews.llvm.org/D109073? unexpected error: -4007
__ https://reviews.llvm.org/D109073
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "test_progs.h"
#include "xdpwall.skel.h"
void test_xdpwall(void)
{
struct xdpwall *skel;
skel = xdpwall__open_and_load();
ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?");
xdpwall__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <stdbool.h>
#include <stdint.h>
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
enum pkt_parse_err {
NO_ERR,
BAD_IP6_HDR,
BAD_IP4GUE_HDR,
BAD_IP6GUE_HDR,
};
enum pkt_flag {
TUNNEL = 0x1,
TCP_SYN = 0x2,
QUIC_INITIAL_FLAG = 0x4,
TCP_ACK = 0x8,
TCP_RST = 0x10
};
struct v4_lpm_key {
__u32 prefixlen;
__u32 src;
};
struct v4_lpm_val {
struct v4_lpm_key key;
__u8 val;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 16);
__type(key, struct in6_addr);
__type(value, bool);
} v6_addr_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 16);
__type(key, __u32);
__type(value, bool);
} v4_addr_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
__uint(max_entries, 16);
__uint(key_size, sizeof(struct v4_lpm_key));
__uint(value_size, sizeof(struct v4_lpm_val));
__uint(map_flags, BPF_F_NO_PREALLOC);
} v4_lpm_val_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 16);
__type(key, int);
__type(value, __u8);
} tcp_port_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 16);
__type(key, int);
__type(value, __u16);
} udp_port_map SEC(".maps");
enum ip_type { V4 = 1, V6 = 2 };
struct fw_match_info {
__u8 v4_src_ip_match;
__u8 v6_src_ip_match;
__u8 v4_src_prefix_match;
__u8 v4_dst_prefix_match;
__u8 tcp_dp_match;
__u16 udp_sp_match;
__u16 udp_dp_match;
bool is_tcp;
bool is_tcp_syn;
};
struct pkt_info {
enum ip_type type;
union {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
} ip;
int sport;
int dport;
__u16 trans_hdr_offset;
__u8 proto;
__u8 flags;
};
static __always_inline struct ethhdr *parse_ethhdr(void *data, void *data_end)
{
struct ethhdr *eth = data;
if (eth + 1 > data_end)
return NULL;
return eth;
}
static __always_inline __u8 filter_ipv6_addr(const struct in6_addr *ipv6addr)
{
__u8 *leaf;
leaf = bpf_map_lookup_elem(&v6_addr_map, ipv6addr);
return leaf ? *leaf : 0;
}
static __always_inline __u8 filter_ipv4_addr(const __u32 ipaddr)
{
__u8 *leaf;
leaf = bpf_map_lookup_elem(&v4_addr_map, &ipaddr);
return leaf ? *leaf : 0;
}
static __always_inline __u8 filter_ipv4_lpm(const __u32 ipaddr)
{
struct v4_lpm_key v4_key = {};
struct v4_lpm_val *lpm_val;
v4_key.src = ipaddr;
v4_key.prefixlen = 32;
lpm_val = bpf_map_lookup_elem(&v4_lpm_val_map, &v4_key);
return lpm_val ? lpm_val->val : 0;
}
static __always_inline void
filter_src_dst_ip(struct pkt_info* info, struct fw_match_info* match_info)
{
if (info->type == V6) {
match_info->v6_src_ip_match =
filter_ipv6_addr(&info->ip.ipv6->saddr);
} else if (info->type == V4) {
match_info->v4_src_ip_match =
filter_ipv4_addr(info->ip.ipv4->saddr);
match_info->v4_src_prefix_match =
filter_ipv4_lpm(info->ip.ipv4->saddr);
match_info->v4_dst_prefix_match =
filter_ipv4_lpm(info->ip.ipv4->daddr);
}
}
static __always_inline void *
get_transport_hdr(__u16 offset, void *data, void *data_end)
{
if (offset > 255 || data + offset > data_end)
return NULL;
return data + offset;
}
static __always_inline bool tcphdr_only_contains_flag(struct tcphdr *tcp,
__u32 FLAG)
{
return (tcp_flag_word(tcp) &
(TCP_FLAG_ACK | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)) == FLAG;
}
static __always_inline void set_tcp_flags(struct pkt_info *info,
struct tcphdr *tcp) {
if (tcphdr_only_contains_flag(tcp, TCP_FLAG_SYN))
info->flags |= TCP_SYN;
else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_ACK))
info->flags |= TCP_ACK;
else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_RST))
info->flags |= TCP_RST;
}
static __always_inline bool
parse_tcp(struct pkt_info *info, void *transport_hdr, void *data_end)
{
struct tcphdr *tcp = transport_hdr;
if (tcp + 1 > data_end)
return false;
info->sport = bpf_ntohs(tcp->source);
info->dport = bpf_ntohs(tcp->dest);
set_tcp_flags(info, tcp);
return true;
}
static __always_inline bool
parse_udp(struct pkt_info *info, void *transport_hdr, void *data_end)
{
struct udphdr *udp = transport_hdr;
if (udp + 1 > data_end)
return false;
info->sport = bpf_ntohs(udp->source);
info->dport = bpf_ntohs(udp->dest);
return true;
}
static __always_inline __u8 filter_tcp_port(int port)
{
__u8 *leaf = bpf_map_lookup_elem(&tcp_port_map, &port);
return leaf ? *leaf : 0;
}
static __always_inline __u16 filter_udp_port(int port)
{
__u16 *leaf = bpf_map_lookup_elem(&udp_port_map, &port);
return leaf ? *leaf : 0;
}
static __always_inline bool
filter_transport_hdr(void *transport_hdr, void *data_end,
struct pkt_info *info, struct fw_match_info *match_info)
{
if (info->proto == IPPROTO_TCP) {
if (!parse_tcp(info, transport_hdr, data_end))
return false;
match_info->is_tcp = true;
match_info->is_tcp_syn = (info->flags & TCP_SYN) > 0;
match_info->tcp_dp_match = filter_tcp_port(info->dport);
} else if (info->proto == IPPROTO_UDP) {
if (!parse_udp(info, transport_hdr, data_end))
return false;
match_info->udp_dp_match = filter_udp_port(info->dport);
match_info->udp_sp_match = filter_udp_port(info->sport);
}
return true;
}
static __always_inline __u8
parse_gue_v6(struct pkt_info *info, struct ipv6hdr *ip6h, void *data_end)
{
struct udphdr *udp = (struct udphdr *)(ip6h + 1);
void *encap_data = udp + 1;
if (udp + 1 > data_end)
return BAD_IP6_HDR;
if (udp->dest != bpf_htons(6666))
return NO_ERR;
info->flags |= TUNNEL;
if (encap_data + 1 > data_end)
return BAD_IP6GUE_HDR;
if (*(__u8 *)encap_data & 0x30) {
struct ipv6hdr *inner_ip6h = encap_data;
if (inner_ip6h + 1 > data_end)
return BAD_IP6GUE_HDR;
info->type = V6;
info->proto = inner_ip6h->nexthdr;
info->ip.ipv6 = inner_ip6h;
info->trans_hdr_offset += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
} else {
struct iphdr *inner_ip4h = encap_data;
if (inner_ip4h + 1 > data_end)
return BAD_IP6GUE_HDR;
info->type = V4;
info->proto = inner_ip4h->protocol;
info->ip.ipv4 = inner_ip4h;
info->trans_hdr_offset += sizeof(struct iphdr) + sizeof(struct udphdr);
}
return NO_ERR;
}
static __always_inline __u8 parse_ipv6_gue(struct pkt_info *info,
void *data, void *data_end)
{
struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
if (ip6h + 1 > data_end)
return BAD_IP6_HDR;
info->proto = ip6h->nexthdr;
info->ip.ipv6 = ip6h;
info->type = V6;
info->trans_hdr_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
if (info->proto == IPPROTO_UDP)
return parse_gue_v6(info, ip6h, data_end);
return NO_ERR;
}
SEC("xdp")
int edgewall(struct xdp_md *ctx)
{
void *data_end = (void *)(long)(ctx->data_end);
void *data = (void *)(long)(ctx->data);
struct fw_match_info match_info = {};
struct pkt_info info = {};
__u8 parse_err = NO_ERR;
void *transport_hdr;
struct ethhdr *eth;
bool filter_res;
__u32 proto;
eth = parse_ethhdr(data, data_end);
if (!eth)
return XDP_DROP;
proto = eth->h_proto;
if (proto != bpf_htons(ETH_P_IPV6))
return XDP_DROP;
if (parse_ipv6_gue(&info, data, data_end))
return XDP_DROP;
if (info.proto == IPPROTO_ICMPV6)
return XDP_PASS;
if (info.proto != IPPROTO_TCP && info.proto != IPPROTO_UDP)
return XDP_DROP;
filter_src_dst_ip(&info, &match_info);
transport_hdr = get_transport_hdr(info.trans_hdr_offset, data,
data_end);
if (!transport_hdr)
return XDP_DROP;
filter_res = filter_transport_hdr(transport_hdr, data_end,
&info, &match_info);
if (!filter_res)
return XDP_DROP;
if (match_info.is_tcp && !match_info.is_tcp_syn)
return XDP_PASS;
return XDP_DROP;
}
char LICENSE[] SEC("license") = "GPL";
......@@ -104,3 +104,164 @@
.result = ACCEPT,
.retval = POINTER_VALUE,
},
{
"Spill and refill a u32 const scalar. Offset to skb->data",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
/* r4 = 20 */
BPF_MOV32_IMM(BPF_REG_4, 20),
/* *(u32 *)(r10 -8) = r4 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* r4 = *(u32 *)(r10 -8) */
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"Spill a u32 const, refill from another half of the uninit u32 from the stack",
.insns = {
/* r4 = 20 */
BPF_MOV32_IMM(BPF_REG_4, 20),
/* *(u32 *)(r10 -8) = r4 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid read from stack off -4+0 size 4",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"Spill a u32 const scalar. Refill as u16. Offset to skb->data",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
/* r4 = 20 */
BPF_MOV32_IMM(BPF_REG_4, 20),
/* *(u32 *)(r10 -8) = r4 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* r4 = *(u16 *)(r10 -8) */
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
/* r4 = 20 */
BPF_MOV32_IMM(BPF_REG_4, 20),
/* *(u32 *)(r10 -8) = r4 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* r4 = *(u16 *)(r10 -6) */
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
/* r4 = 20 */
BPF_MOV32_IMM(BPF_REG_4, 20),
/* *(u32 *)(r10 -8) = r4 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* *(u32 *)(r10 -4) = r4 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
/* r4 = *(u32 *)(r10 -4), */
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"Spill and refill a umax=40 bounded scalar. Offset to skb->data",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, tstamp)),
BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* r4 = (*u32 *)(r10 - 8) */
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
/* r2 += r4 R2=pkt R4=inv,umax=40 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
/* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment