Commit 4c042a80 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-misc-next'

Daniel Borkmann says:

====================
Misc BPF updates

This set cleans up ldimm64 leftovers from early eBPF days and
adds couple of test cases related to this to the verifier test
suite. It also cleans up the kallsyms spinlock (had same patch
also in queue) by relaxing it through switching to _bh variant.
It fixes up test_progs in relation to htons/ntohs and adds
accessor macros for the percpu tests in test_maps.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cd5487fb f3515b5d
...@@ -604,15 +604,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -604,15 +604,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const struct bpf_insn insn1 = insn[1]; const struct bpf_insn insn1 = insn[1];
u64 imm64; u64 imm64;
if (insn1.code != 0 || insn1.src_reg != 0 ||
insn1.dst_reg != 0 || insn1.off != 0) {
/* Note: verifier in BPF core must catch invalid
* instructions.
*/
pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
return -EINVAL;
}
imm64 = (u64)insn1.imm << 32 | (u32)imm; imm64 = (u64)insn1.imm << 32 | (u32)imm;
emit_a64_mov_i64(dst, imm64, ctx); emit_a64_mov_i64(dst, imm64, ctx);
......
...@@ -490,13 +490,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, ...@@ -490,13 +490,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
break; break;
case BPF_LD | BPF_IMM | BPF_DW: case BPF_LD | BPF_IMM | BPF_DW:
if (insn[1].code != 0 || insn[1].src_reg != 0 ||
insn[1].dst_reg != 0 || insn[1].off != 0) {
/* verifier must catch invalid insns */
pr_err("invalid BPF_LD_IMM64 insn\n");
return -EINVAL;
}
/* optimization: if imm64 is zero, use 'xor <dst>,<dst>' /* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
* to save 7 bytes. * to save 7 bytes.
*/ */
......
...@@ -394,27 +394,23 @@ static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) ...@@ -394,27 +394,23 @@ static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
void bpf_prog_kallsyms_add(struct bpf_prog *fp) void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{ {
unsigned long flags;
if (!bpf_prog_kallsyms_candidate(fp) || if (!bpf_prog_kallsyms_candidate(fp) ||
!capable(CAP_SYS_ADMIN)) !capable(CAP_SYS_ADMIN))
return; return;
spin_lock_irqsave(&bpf_lock, flags); spin_lock_bh(&bpf_lock);
bpf_prog_ksym_node_add(fp->aux); bpf_prog_ksym_node_add(fp->aux);
spin_unlock_irqrestore(&bpf_lock, flags); spin_unlock_bh(&bpf_lock);
} }
void bpf_prog_kallsyms_del(struct bpf_prog *fp) void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{ {
unsigned long flags;
if (!bpf_prog_kallsyms_candidate(fp)) if (!bpf_prog_kallsyms_candidate(fp))
return; return;
spin_lock_irqsave(&bpf_lock, flags); spin_lock_bh(&bpf_lock);
bpf_prog_ksym_node_del(fp->aux); bpf_prog_ksym_node_del(fp->aux);
spin_unlock_irqrestore(&bpf_lock, flags); spin_unlock_bh(&bpf_lock);
} }
static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
......
...@@ -6,6 +6,25 @@ ...@@ -6,6 +6,25 @@
#include <string.h> #include <string.h>
#include <errno.h> #include <errno.h>
#include <asm/byteorder.h>
#if __BYTE_ORDER == __LITTLE_ENDIAN
# define __bpf_ntohs(x) __builtin_bswap16(x)
# define __bpf_htons(x) __builtin_bswap16(x)
#elif __BYTE_ORDER == __BIG_ENDIAN
# define __bpf_ntohs(x) (x)
# define __bpf_htons(x) (x)
#else
# error "Fix your __BYTE_ORDER?!"
#endif
#define bpf_htons(x) \
(__builtin_constant_p(x) ? \
__constant_htons(x) : __bpf_htons(x))
#define bpf_ntohs(x) \
(__builtin_constant_p(x) ? \
__constant_ntohs(x) : __bpf_ntohs(x))
static inline unsigned int bpf_num_possible_cpus(void) static inline unsigned int bpf_num_possible_cpus(void)
{ {
static const char *fcpu = "/sys/devices/system/cpu/possible"; static const char *fcpu = "/sys/devices/system/cpu/possible";
...@@ -35,4 +54,11 @@ static inline unsigned int bpf_num_possible_cpus(void) ...@@ -35,4 +54,11 @@ static inline unsigned int bpf_num_possible_cpus(void)
return possible_cpus; return possible_cpus;
} }
#define __bpf_percpu_val_align __attribute__((__aligned__(8)))
#define BPF_DECLARE_PERCPU(type, name) \
struct { type v; /* padding */ } __bpf_percpu_val_align \
name[bpf_num_possible_cpus()]
#define bpf_percpu(name, cpu) name[(cpu)].v
#endif /* __BPF_UTIL__ */ #endif /* __BPF_UTIL__ */
...@@ -19,9 +19,8 @@ ...@@ -19,9 +19,8 @@
#include <linux/udp.h> #include <linux/udp.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "test_iptunnel_common.h" #include "test_iptunnel_common.h"
#include "bpf_util.h"
#define htons __builtin_bswap16
#define ntohs __builtin_bswap16
int _version SEC("version") = 1; int _version SEC("version") = 1;
static inline __u32 rol32(__u32 word, unsigned int shift) static inline __u32 rol32(__u32 word, unsigned int shift)
...@@ -355,7 +354,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end, ...@@ -355,7 +354,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
iph_len = sizeof(struct ipv6hdr); iph_len = sizeof(struct ipv6hdr);
protocol = ip6h->nexthdr; protocol = ip6h->nexthdr;
pckt.proto = protocol; pckt.proto = protocol;
pkt_bytes = ntohs(ip6h->payload_len); pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len; off += iph_len;
if (protocol == IPPROTO_FRAGMENT) { if (protocol == IPPROTO_FRAGMENT) {
return TC_ACT_SHOT; return TC_ACT_SHOT;
...@@ -377,7 +376,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end, ...@@ -377,7 +376,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
protocol = iph->protocol; protocol = iph->protocol;
pckt.proto = protocol; pckt.proto = protocol;
pkt_bytes = ntohs(iph->tot_len); pkt_bytes = bpf_ntohs(iph->tot_len);
off += IPV4_HDR_LEN_NO_OPT; off += IPV4_HDR_LEN_NO_OPT;
if (iph->frag_off & PCKT_FRAGMENTED) if (iph->frag_off & PCKT_FRAGMENTED)
...@@ -464,9 +463,9 @@ int balancer_ingress(struct __sk_buff *ctx) ...@@ -464,9 +463,9 @@ int balancer_ingress(struct __sk_buff *ctx)
if (data + nh_off > data_end) if (data + nh_off > data_end)
return TC_ACT_SHOT; return TC_ACT_SHOT;
eth_proto = eth->eth_proto; eth_proto = eth->eth_proto;
if (eth_proto == htons(ETH_P_IP)) if (eth_proto == bpf_htons(ETH_P_IP))
return process_packet(data, nh_off, data_end, false, ctx); return process_packet(data, nh_off, data_end, false, ctx);
else if (eth_proto == htons(ETH_P_IPV6)) else if (eth_proto == bpf_htons(ETH_P_IPV6))
return process_packet(data, nh_off, data_end, true, ctx); return process_packet(data, nh_off, data_end, true, ctx);
else else
return TC_ACT_SHOT; return TC_ACT_SHOT;
......
...@@ -137,20 +137,20 @@ static void test_hashmap_sizes(int task, void *data) ...@@ -137,20 +137,20 @@ static void test_hashmap_sizes(int task, void *data)
static void test_hashmap_percpu(int task, void *data) static void test_hashmap_percpu(int task, void *data)
{ {
unsigned int nr_cpus = bpf_num_possible_cpus(); unsigned int nr_cpus = bpf_num_possible_cpus();
long long value[nr_cpus]; BPF_DECLARE_PERCPU(long, value);
long long key, next_key, first_key; long long key, next_key, first_key;
int expected_key_mask = 0; int expected_key_mask = 0;
int fd, i; int fd, i;
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
sizeof(value[0]), 2, map_flags); sizeof(bpf_percpu(value, 0)), 2, map_flags);
if (fd < 0) { if (fd < 0) {
printf("Failed to create hashmap '%s'!\n", strerror(errno)); printf("Failed to create hashmap '%s'!\n", strerror(errno));
exit(1); exit(1);
} }
for (i = 0; i < nr_cpus; i++) for (i = 0; i < nr_cpus; i++)
value[i] = i + 100; bpf_percpu(value, i) = i + 100;
key = 1; key = 1;
/* Insert key=1 element. */ /* Insert key=1 element. */
...@@ -170,8 +170,9 @@ static void test_hashmap_percpu(int task, void *data) ...@@ -170,8 +170,9 @@ static void test_hashmap_percpu(int task, void *data)
/* Check that key=1 can be found. Value could be 0 if the lookup /* Check that key=1 can be found. Value could be 0 if the lookup
* was run from a different CPU. * was run from a different CPU.
*/ */
value[0] = 1; bpf_percpu(value, 0) = 1;
assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100); assert(bpf_map_lookup_elem(fd, &key, value) == 0 &&
bpf_percpu(value, 0) == 100);
key = 2; key = 2;
/* Check that key=2 is not found. */ /* Check that key=2 is not found. */
...@@ -211,7 +212,7 @@ static void test_hashmap_percpu(int task, void *data) ...@@ -211,7 +212,7 @@ static void test_hashmap_percpu(int task, void *data)
assert(bpf_map_lookup_elem(fd, &next_key, value) == 0); assert(bpf_map_lookup_elem(fd, &next_key, value) == 0);
for (i = 0; i < nr_cpus; i++) for (i = 0; i < nr_cpus; i++)
assert(value[i] == i + 100); assert(bpf_percpu(value, i) == i + 100);
key = next_key; key = next_key;
} }
...@@ -296,34 +297,36 @@ static void test_arraymap(int task, void *data) ...@@ -296,34 +297,36 @@ static void test_arraymap(int task, void *data)
static void test_arraymap_percpu(int task, void *data) static void test_arraymap_percpu(int task, void *data)
{ {
unsigned int nr_cpus = bpf_num_possible_cpus(); unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, values);
int key, next_key, fd, i; int key, next_key, fd, i;
long long values[nr_cpus];
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
sizeof(values[0]), 2, 0); sizeof(bpf_percpu(values, 0)), 2, 0);
if (fd < 0) { if (fd < 0) {
printf("Failed to create arraymap '%s'!\n", strerror(errno)); printf("Failed to create arraymap '%s'!\n", strerror(errno));
exit(1); exit(1);
} }
for (i = 0; i < nr_cpus; i++) for (i = 0; i < nr_cpus; i++)
values[i] = i + 100; bpf_percpu(values, i) = i + 100;
key = 1; key = 1;
/* Insert key=1 element. */ /* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
values[0] = 0; bpf_percpu(values, 0) = 0;
assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 && assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 &&
errno == EEXIST); errno == EEXIST);
/* Check that key=1 can be found. */ /* Check that key=1 can be found. */
assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100); assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
bpf_percpu(values, 0) == 100);
key = 0; key = 0;
/* Check that key=0 is also found and zero initialized. */ /* Check that key=0 is also found and zero initialized. */
assert(bpf_map_lookup_elem(fd, &key, values) == 0 && assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
values[0] == 0 && values[nr_cpus - 1] == 0); bpf_percpu(values, 0) == 0 &&
bpf_percpu(values, nr_cpus - 1) == 0);
/* Check that key=2 cannot be inserted due to max_entries limit. */ /* Check that key=2 cannot be inserted due to max_entries limit. */
key = 2; key = 2;
...@@ -353,15 +356,15 @@ static void test_arraymap_percpu(int task, void *data) ...@@ -353,15 +356,15 @@ static void test_arraymap_percpu(int task, void *data)
static void test_arraymap_percpu_many_keys(void) static void test_arraymap_percpu_many_keys(void)
{ {
unsigned int nr_cpus = bpf_num_possible_cpus(); unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, values);
/* nr_keys is not too large otherwise the test stresses percpu /* nr_keys is not too large otherwise the test stresses percpu
* allocator more than anything else * allocator more than anything else
*/ */
unsigned int nr_keys = 2000; unsigned int nr_keys = 2000;
long long values[nr_cpus];
int key, fd, i; int key, fd, i;
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
sizeof(values[0]), nr_keys, 0); sizeof(bpf_percpu(values, 0)), nr_keys, 0);
if (fd < 0) { if (fd < 0) {
printf("Failed to create per-cpu arraymap '%s'!\n", printf("Failed to create per-cpu arraymap '%s'!\n",
strerror(errno)); strerror(errno));
...@@ -369,19 +372,19 @@ static void test_arraymap_percpu_many_keys(void) ...@@ -369,19 +372,19 @@ static void test_arraymap_percpu_many_keys(void)
} }
for (i = 0; i < nr_cpus; i++) for (i = 0; i < nr_cpus; i++)
values[i] = i + 10; bpf_percpu(values, i) = i + 10;
for (key = 0; key < nr_keys; key++) for (key = 0; key < nr_keys; key++)
assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
for (key = 0; key < nr_keys; key++) { for (key = 0; key < nr_keys; key++) {
for (i = 0; i < nr_cpus; i++) for (i = 0; i < nr_cpus; i++)
values[i] = 0; bpf_percpu(values, i) = 0;
assert(bpf_map_lookup_elem(fd, &key, values) == 0); assert(bpf_map_lookup_elem(fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++) for (i = 0; i < nr_cpus; i++)
assert(values[i] == i + 10); assert(bpf_percpu(values, i) == i + 10);
} }
close(fd); close(fd);
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/pkt_cls.h> #include <linux/pkt_cls.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_util.h"
#define _htons __builtin_bswap16
#define barrier() __asm__ __volatile__("": : :"memory") #define barrier() __asm__ __volatile__("": : :"memory")
int _version SEC("version") = 1; int _version SEC("version") = 1;
...@@ -32,7 +32,7 @@ int process(struct __sk_buff *skb) ...@@ -32,7 +32,7 @@ int process(struct __sk_buff *skb)
if (eth + 1 > data_end) if (eth + 1 > data_end)
return TC_ACT_SHOT; return TC_ACT_SHOT;
if (eth->h_proto == _htons(ETH_P_IP)) { if (eth->h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(eth + 1); struct iphdr *iph = (struct iphdr *)(eth + 1);
if (iph + 1 > data_end) if (iph + 1 > data_end)
...@@ -40,7 +40,7 @@ int process(struct __sk_buff *skb) ...@@ -40,7 +40,7 @@ int process(struct __sk_buff *skb)
ihl_len = iph->ihl * 4; ihl_len = iph->ihl * 4;
proto = iph->protocol; proto = iph->protocol;
tcp = (struct tcphdr *)((void *)(iph) + ihl_len); tcp = (struct tcphdr *)((void *)(iph) + ihl_len);
} else if (eth->h_proto == _htons(ETH_P_IPV6)) { } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1); struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1);
if (ip6h + 1 > data_end) if (ip6h + 1 > data_end)
......
...@@ -30,8 +30,6 @@ typedef __u16 __sum16; ...@@ -30,8 +30,6 @@ typedef __u16 __sum16;
#include "test_iptunnel_common.h" #include "test_iptunnel_common.h"
#include "bpf_util.h" #include "bpf_util.h"
#define _htons __builtin_bswap16
static int error_cnt, pass_cnt; static int error_cnt, pass_cnt;
#define MAGIC_BYTES 123 #define MAGIC_BYTES 123
...@@ -42,10 +40,10 @@ static struct { ...@@ -42,10 +40,10 @@ static struct {
struct iphdr iph; struct iphdr iph;
struct tcphdr tcp; struct tcphdr tcp;
} __packed pkt_v4 = { } __packed pkt_v4 = {
.eth.h_proto = _htons(ETH_P_IP), .eth.h_proto = bpf_htons(ETH_P_IP),
.iph.ihl = 5, .iph.ihl = 5,
.iph.protocol = 6, .iph.protocol = 6,
.iph.tot_len = _htons(MAGIC_BYTES), .iph.tot_len = bpf_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123, .tcp.urg_ptr = 123,
}; };
...@@ -55,9 +53,9 @@ static struct { ...@@ -55,9 +53,9 @@ static struct {
struct ipv6hdr iph; struct ipv6hdr iph;
struct tcphdr tcp; struct tcphdr tcp;
} __packed pkt_v6 = { } __packed pkt_v6 = {
.eth.h_proto = _htons(ETH_P_IPV6), .eth.h_proto = bpf_htons(ETH_P_IPV6),
.iph.nexthdr = 6, .iph.nexthdr = 6,
.iph.payload_len = _htons(MAGIC_BYTES), .iph.payload_len = bpf_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123, .tcp.urg_ptr = 123,
}; };
......
...@@ -190,6 +190,86 @@ static struct bpf_test tests[] = { ...@@ -190,6 +190,86 @@ static struct bpf_test tests[] = {
.errstr = "invalid bpf_ld_imm64 insn", .errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT, .result = REJECT,
}, },
{
"test6 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"test7 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"test8 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "uses reserved fields",
.result = REJECT,
},
{
"test9 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 1, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test10 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test11 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test12 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "not pointing to valid bpf_map",
.result = REJECT,
},
{
"test13 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{ {
"no bpf_exit", "no bpf_exit",
.insns = { .insns = {
...@@ -330,6 +410,30 @@ static struct bpf_test tests[] = { ...@@ -330,6 +410,30 @@ static struct bpf_test tests[] = {
.errstr = "invalid read from stack", .errstr = "invalid read from stack",
.result = REJECT, .result = REJECT,
}, },
{
"invalid fp arithmetic",
/* If this gets ever changed, make sure JITs can deal with it. */
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer arithmetic",
.result_unpriv = REJECT,
.errstr = "R1 invalid mem access",
.result = REJECT,
},
{
"non-invalid fp arithmetic",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{ {
"invalid argument register", "invalid argument register",
.insns = { .insns = {
...@@ -1800,6 +1904,20 @@ static struct bpf_test tests[] = { ...@@ -1800,6 +1904,20 @@ static struct bpf_test tests[] = {
.result_unpriv = REJECT, .result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
}, },
{
"unpriv: adding of fp",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "pointer arithmetic prohibited",
.result_unpriv = REJECT,
.errstr = "R1 invalid mem access",
.result = REJECT,
},
{ {
"unpriv: cmp of stack pointer", "unpriv: cmp of stack pointer",
.insns = { .insns = {
...@@ -2471,6 +2589,25 @@ static struct bpf_test tests[] = { ...@@ -2471,6 +2589,25 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
}, },
{
"direct packet access: test16 (arith on data_end)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{ {
"helper access to packet: test1, valid packet_ptr range", "helper access to packet: test1, valid packet_ptr range",
.insns = { .insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment