Commit f89271f0 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2021-04-28

The main changes are:

1) Add link detach and following re-attach for trampolines, from Jiri Olsa.

2) Use kernel's "binary printf" lib for formatted output BPF helpers (which
   avoids the needs for variadic argument handling), from Florent Revest.

3) Fix verifier 64 to 32 bit min/max bound propagation, from Daniel Borkmann.

4) Convert cpumap to use netif_receive_skb_list(), from Lorenzo Bianconi.

5) Add generic batched-ops support to percpu array map, from Pedro Tammela.

6) Various CO-RE relocation BPF selftests fixes, from Andrii Nakryiko.

7) Misc doc rst fixes, from Hengqi Chen.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next:
  bpf, selftests: Update array map tests for per-cpu batched ops
  bpf: Add batched ops support for percpu array
  bpf: Implement formatted output helpers with bstr_printf
  seq_file: Add a seq_bprintf function
  bpf, docs: Fix literal block for example code
  bpf, cpumap: Bulk skb using netif_receive_skb_list
  bpf: Fix propagation of 32 bit unsigned bounds from 64 bit bounds
  bpf: Lock bpf_trace_printk's tmp buf before it is written to
  selftests/bpf: Fix core_reloc test runner
  selftests/bpf: Fix field existence CO-RE reloc tests
  selftests/bpf: Fix BPF_CORE_READ_BITFIELD() macro
  libbpf: Support BTF_KIND_FLOAT during type compatibility checks in CO-RE
  selftests/bpf: Add remaining ASSERT_xxx() variants
  selftests/bpf: Use ASSERT macros in lsm test
  selftests/bpf: Test that module can't be unloaded with attached trampoline
  selftests/bpf: Add re-attach test to lsm test
  selftests/bpf: Add re-attach test to fexit_test
  selftests/bpf: Add re-attach test to fentry_test
  bpf: Allow trampoline re-attach for tracing and lsm programs
====================

Link: https://lore.kernel.org/r/20210427233740.22238-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 99ba0ea6 3733bfbb
......@@ -327,7 +327,7 @@ Examples for low-level BPF:
ret #-1
drop: ret #0
**icmp random packet sampling, 1 in 4**:
**icmp random packet sampling, 1 in 4**::
ldh [12]
jne #0x800, drop
......
......@@ -412,6 +412,24 @@ void seq_printf(struct seq_file *m, const char *f, ...)
}
EXPORT_SYMBOL(seq_printf);
#ifdef CONFIG_BINARY_PRINTF
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary)
{
int len;
if (m->count < m->size) {
len = bstr_printf(m->buf + m->count, m->size - m->count, f,
binary);
if (m->count + len < m->size) {
m->count += len;
return;
}
}
seq_set_overflow(m);
}
EXPORT_SYMBOL(seq_bprintf);
#endif /* CONFIG_BINARY_PRINTF */
/**
* mangle_path - mangle and copy path to buffer beginning
* @s: buffer start
......
......@@ -2081,24 +2081,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
enum bpf_printf_mod_type {
BPF_PRINTF_INT,
BPF_PRINTF_LONG,
BPF_PRINTF_LONG_LONG,
};
/* Workaround for getting va_list handling working with different argument type
* combinations generically for 32 and 64 bit archs.
*/
#define BPF_CAST_FMT_ARG(arg_nb, args, mod) \
(mod[arg_nb] == BPF_PRINTF_LONG_LONG || \
(mod[arg_nb] == BPF_PRINTF_LONG && __BITS_PER_LONG == 64) \
? (u64)args[arg_nb] \
: (u32)args[arg_nb])
int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u64 *final_args, enum bpf_printf_mod_type *mod,
u32 num_args);
void bpf_printf_cleanup(void);
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
#endif /* _LINUX_BPF_H */
......@@ -146,6 +146,10 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
#ifdef CONFIG_BINARY_PRINTF
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
#endif
#define DEFINE_SEQ_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
......
......@@ -1708,6 +1708,7 @@ config BPF_SYSCALL
select BPF
select IRQ_WORK
select TASKS_TRACE_RCU
select BINARY_PRINTF
select NET_SOCK_MSG if INET
default n
help
......
......@@ -698,6 +698,8 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_delete_elem = array_map_delete_elem,
.map_seq_show_elem = percpu_array_map_seq_show_elem,
.map_check_btf = array_map_check_btf,
.map_lookup_batch = generic_map_lookup_batch,
.map_update_batch = generic_map_update_batch,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_array_elem,
.map_btf_name = "bpf_array",
......
......@@ -27,7 +27,7 @@
#include <linux/capability.h>
#include <trace/events/xdp.h>
#include <linux/netdevice.h> /* netif_receive_skb_core */
#include <linux/netdevice.h> /* netif_receive_skb_list */
#include <linux/etherdevice.h> /* eth_type_trans */
/* General idea: XDP packets getting XDP redirected to another CPU,
......@@ -252,11 +252,12 @@ static int cpu_map_kthread_run(void *data)
*/
while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
struct xdp_cpumap_stats stats = {}; /* zero stats */
unsigned int kmem_alloc_drops = 0, sched = 0;
gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
unsigned int drops = 0, sched = 0;
void *frames[CPUMAP_BATCH];
void *skbs[CPUMAP_BATCH];
int i, n, m, nframes;
LIST_HEAD(list);
/* Release CPU reschedule checks */
if (__ptr_ring_empty(rcpu->queue)) {
......@@ -297,7 +298,7 @@ static int cpu_map_kthread_run(void *data)
if (unlikely(m == 0)) {
for (i = 0; i < nframes; i++)
skbs[i] = NULL; /* effect: xdp_return_frame */
drops += nframes;
kmem_alloc_drops += nframes;
}
}
......@@ -305,7 +306,6 @@ static int cpu_map_kthread_run(void *data)
for (i = 0; i < nframes; i++) {
struct xdp_frame *xdpf = frames[i];
struct sk_buff *skb = skbs[i];
int ret;
skb = __xdp_build_skb_from_frame(xdpf, skb,
xdpf->dev_rx);
......@@ -314,13 +314,13 @@ static int cpu_map_kthread_run(void *data)
continue;
}
/* Inject into network stack */
ret = netif_receive_skb_core(skb);
if (ret == NET_RX_DROP)
drops++;
list_add_tail(&skb->list, &list);
}
netif_receive_skb_list(&list);
/* Feedback loop via tracepoint */
trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats);
trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
sched, &stats);
local_bh_enable(); /* resched point, may call do_softirq() */
}
......
......@@ -707,9 +707,6 @@ static int try_get_fmt_tmp_buf(char **tmp_buf)
struct bpf_printf_buf *bufs;
int used;
if (*tmp_buf)
return 0;
preempt_disable();
used = this_cpu_inc_return(bpf_printf_buf_used);
if (WARN_ON_ONCE(used > 1)) {
......@@ -723,7 +720,7 @@ static int try_get_fmt_tmp_buf(char **tmp_buf)
return 0;
}
void bpf_printf_cleanup(void)
void bpf_bprintf_cleanup(void)
{
if (this_cpu_read(bpf_printf_buf_used)) {
this_cpu_dec(bpf_printf_buf_used);
......@@ -732,43 +729,45 @@ void bpf_printf_cleanup(void)
}
/*
* bpf_parse_fmt_str - Generic pass on format strings for printf-like helpers
* bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
*
* Returns a negative value if fmt is an invalid format string or 0 otherwise.
*
* This can be used in two ways:
* - Format string verification only: when final_args and mod are NULL
* - Format string verification only: when bin_args is NULL
* - Arguments preparation: in addition to the above verification, it writes in
* final_args a copy of raw_args where pointers from BPF have been sanitized
* into pointers safe to use by snprintf. This also writes in the mod array
* the size requirement of each argument, usable by BPF_CAST_FMT_ARG for ex.
* bin_args a binary representation of arguments usable by bstr_printf where
* pointers from BPF have been sanitized.
*
* In argument preparation mode, if 0 is returned, safe temporary buffers are
* allocated and bpf_printf_cleanup should be called to free them after use.
* allocated and bpf_bprintf_cleanup should be called to free them after use.
*/
int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u64 *final_args, enum bpf_printf_mod_type *mod,
u32 num_args)
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_args, u32 num_args)
{
char *unsafe_ptr = NULL, *tmp_buf = NULL, *fmt_end;
size_t tmp_buf_len = MAX_PRINTF_BUF_LEN;
int err, i, num_spec = 0, copy_size;
enum bpf_printf_mod_type cur_mod;
char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
size_t sizeof_cur_arg, sizeof_cur_ip;
int err, i, num_spec = 0;
u64 cur_arg;
char fmt_ptype;
if (!!final_args != !!mod)
return -EINVAL;
char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
fmt_end = strnchr(fmt, fmt_size, 0);
if (!fmt_end)
return -EINVAL;
fmt_size = fmt_end - fmt;
if (bin_args) {
if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
return -EBUSY;
tmp_buf_end = tmp_buf + MAX_PRINTF_BUF_LEN;
*bin_args = (u32 *)tmp_buf;
}
for (i = 0; i < fmt_size; i++) {
if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
err = -EINVAL;
goto cleanup;
goto out;
}
if (fmt[i] != '%')
......@@ -781,7 +780,7 @@ int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
if (num_spec >= num_args) {
err = -EINVAL;
goto cleanup;
goto out;
}
/* The string is zero-terminated so if fmt[i] != 0, we can
......@@ -800,7 +799,7 @@ int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
}
if (fmt[i] == 'p') {
cur_mod = BPF_PRINTF_LONG;
sizeof_cur_arg = sizeof(long);
if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
fmt[i + 2] == 's') {
......@@ -811,117 +810,140 @@ int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
fmt[i + 1] == 'x' || fmt[i + 1] == 'B' ||
fmt[i + 1] == 's' || fmt[i + 1] == 'S') {
fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
fmt[i + 1] == 'S') {
/* just kernel pointers */
if (final_args)
if (tmp_buf)
cur_arg = raw_args[num_spec];
goto fmt_next;
i++;
goto nocopy_fmt;
}
if (fmt[i + 1] == 'B') {
if (tmp_buf) {
err = snprintf(tmp_buf,
(tmp_buf_end - tmp_buf),
"%pB",
(void *)(long)raw_args[num_spec]);
tmp_buf += (err + 1);
}
i++;
num_spec++;
continue;
}
/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
(fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
err = -EINVAL;
goto cleanup;
goto out;
}
i += 2;
if (!final_args)
goto fmt_next;
if (!tmp_buf)
goto nocopy_fmt;
if (try_get_fmt_tmp_buf(&tmp_buf)) {
err = -EBUSY;
goto out;
}
copy_size = (fmt[i + 2] == '4') ? 4 : 16;
if (tmp_buf_len < copy_size) {
sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
err = -ENOSPC;
goto cleanup;
goto out;
}
unsafe_ptr = (char *)(long)raw_args[num_spec];
err = copy_from_kernel_nofault(tmp_buf, unsafe_ptr,
copy_size);
err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
sizeof_cur_ip);
if (err < 0)
memset(tmp_buf, 0, copy_size);
cur_arg = (u64)(long)tmp_buf;
tmp_buf += copy_size;
tmp_buf_len -= copy_size;
memset(cur_ip, 0, sizeof_cur_ip);
goto fmt_next;
/* hack: bstr_printf expects IP addresses to be
* pre-formatted as strings, ironically, the easiest way
* to do that is to call snprintf.
*/
ip_spec[2] = fmt[i - 1];
ip_spec[3] = fmt[i];
err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
ip_spec, &cur_ip);
tmp_buf += err + 1;
num_spec++;
continue;
} else if (fmt[i] == 's') {
cur_mod = BPF_PRINTF_LONG;
fmt_ptype = fmt[i];
fmt_str:
if (fmt[i + 1] != 0 &&
!isspace(fmt[i + 1]) &&
!ispunct(fmt[i + 1])) {
err = -EINVAL;
goto cleanup;
}
if (!final_args)
goto fmt_next;
if (try_get_fmt_tmp_buf(&tmp_buf)) {
err = -EBUSY;
goto out;
}
if (!tmp_buf_len) {
if (!tmp_buf)
goto nocopy_fmt;
if (tmp_buf_end == tmp_buf) {
err = -ENOSPC;
goto cleanup;
goto out;
}
unsafe_ptr = (char *)(long)raw_args[num_spec];
err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
fmt_ptype, tmp_buf_len);
fmt_ptype,
tmp_buf_end - tmp_buf);
if (err < 0) {
tmp_buf[0] = '\0';
err = 1;
}
cur_arg = (u64)(long)tmp_buf;
tmp_buf += err;
tmp_buf_len -= err;
num_spec++;
goto fmt_next;
continue;
}
cur_mod = BPF_PRINTF_INT;
sizeof_cur_arg = sizeof(int);
if (fmt[i] == 'l') {
cur_mod = BPF_PRINTF_LONG;
sizeof_cur_arg = sizeof(long);
i++;
}
if (fmt[i] == 'l') {
cur_mod = BPF_PRINTF_LONG_LONG;
sizeof_cur_arg = sizeof(long long);
i++;
}
if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
fmt[i] != 'x' && fmt[i] != 'X') {
err = -EINVAL;
goto cleanup;
goto out;
}
if (final_args)
if (tmp_buf)
cur_arg = raw_args[num_spec];
fmt_next:
if (final_args) {
mod[num_spec] = cur_mod;
final_args[num_spec] = cur_arg;
nocopy_fmt:
if (tmp_buf) {
tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
err = -ENOSPC;
goto out;
}
if (sizeof_cur_arg == 8) {
*(u32 *)tmp_buf = *(u32 *)&cur_arg;
*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
} else {
*(u32 *)tmp_buf = (u32)(long)cur_arg;
}
tmp_buf += sizeof_cur_arg;
}
num_spec++;
}
err = 0;
cleanup:
if (err)
bpf_printf_cleanup();
out:
if (err)
bpf_bprintf_cleanup();
return err;
}
......@@ -930,9 +952,8 @@ int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
const void *, data, u32, data_len)
{
enum bpf_printf_mod_type mod[MAX_SNPRINTF_VARARGS];
u64 args[MAX_SNPRINTF_VARARGS];
int err, num_args;
u32 *bin_args;
if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 ||
(data_len && !data))
......@@ -942,22 +963,13 @@ BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
* can safely give an unbounded size.
*/
err = bpf_printf_prepare(fmt, UINT_MAX, data, args, mod, num_args);
err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
if (err < 0)
return err;
/* Maximumly we can have MAX_SNPRINTF_VARARGS parameters, just give
* all of them to snprintf().
*/
err = snprintf(str, str_size, fmt, BPF_CAST_FMT_ARG(0, args, mod),
BPF_CAST_FMT_ARG(1, args, mod), BPF_CAST_FMT_ARG(2, args, mod),
BPF_CAST_FMT_ARG(3, args, mod), BPF_CAST_FMT_ARG(4, args, mod),
BPF_CAST_FMT_ARG(5, args, mod), BPF_CAST_FMT_ARG(6, args, mod),
BPF_CAST_FMT_ARG(7, args, mod), BPF_CAST_FMT_ARG(8, args, mod),
BPF_CAST_FMT_ARG(9, args, mod), BPF_CAST_FMT_ARG(10, args, mod),
BPF_CAST_FMT_ARG(11, args, mod));
bpf_printf_cleanup();
err = bstr_printf(str, str_size, fmt, bin_args);
bpf_bprintf_cleanup();
return err + 1;
}
......
......@@ -2650,13 +2650,24 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
* - if tgt_prog == NULL when this function was called using the old
* raw_tracepoint_open API, and we need a target from prog->aux
*
* The combination of no saved target in prog->aux, and no target
* specified on load is illegal, and we reject that here.
* - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
* was detached and is going for re-attachment.
*/
if (!prog->aux->dst_trampoline && !tgt_prog) {
err = -ENOENT;
/*
* Allow re-attach for TRACING and LSM programs. If it's
* currently linked, bpf_trampoline_link_prog will fail.
* EXT programs need to specify tgt_prog_fd, so they
* re-attach in separate code path.
*/
if (prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_LSM) {
err = -EINVAL;
goto out_unlock;
}
btf_id = prog->aux->attach_btf_id;
key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
}
if (!prog->aux->dst_trampoline ||
(key && key != prog->aux->dst_trampoline->key)) {
......
......@@ -444,7 +444,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
tr->progs_cnt[kind]++;
err = bpf_trampoline_update(tr);
if (err) {
hlist_del(&prog->aux->tramp_hlist);
hlist_del_init(&prog->aux->tramp_hlist);
tr->progs_cnt[kind]--;
}
out:
......@@ -467,7 +467,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
tr->extension_prog = NULL;
goto out;
}
hlist_del(&prog->aux->tramp_hlist);
hlist_del_init(&prog->aux->tramp_hlist);
tr->progs_cnt[kind]--;
err = bpf_trampoline_update(tr);
out:
......
......@@ -1398,9 +1398,7 @@ static bool __reg64_bound_s32(s64 a)
static bool __reg64_bound_u32(u64 a)
{
if (a > U32_MIN && a < U32_MAX)
return true;
return false;
return a > U32_MIN && a < U32_MAX;
}
static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
......@@ -1411,10 +1409,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
reg->s32_min_value = (s32)reg->smin_value;
reg->s32_max_value = (s32)reg->smax_value;
}
if (__reg64_bound_u32(reg->umin_value))
if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
reg->u32_min_value = (u32)reg->umin_value;
if (__reg64_bound_u32(reg->umax_value))
reg->u32_max_value = (u32)reg->umax_value;
}
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
......@@ -5948,7 +5946,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
* can focus on validating the format specifiers.
*/
err = bpf_printf_prepare(fmt, UINT_MAX, NULL, NULL, NULL, num_args);
err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
if (err < 0)
verbose(env, "Invalid format string\n");
......
......@@ -381,27 +381,23 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
u64, arg2, u64, arg3)
{
u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
enum bpf_printf_mod_type mod[MAX_TRACE_PRINTK_VARARGS];
u32 *bin_args;
static char buf[BPF_TRACE_PRINTK_SIZE];
unsigned long flags;
int ret;
ret = bpf_printf_prepare(fmt, fmt_size, args, args, mod,
ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
MAX_TRACE_PRINTK_VARARGS);
if (ret < 0)
return ret;
ret = snprintf(buf, sizeof(buf), fmt, BPF_CAST_FMT_ARG(0, args, mod),
BPF_CAST_FMT_ARG(1, args, mod), BPF_CAST_FMT_ARG(2, args, mod));
/* snprintf() will not append null for zero-length strings */
if (ret == 0)
buf[0] = '\0';
raw_spin_lock_irqsave(&trace_printk_lock, flags);
ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
trace_bpf_trace_printk(buf);
raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
bpf_printf_cleanup();
bpf_bprintf_cleanup();
return ret;
}
......@@ -435,31 +431,21 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
const void *, data, u32, data_len)
{
enum bpf_printf_mod_type mod[MAX_SEQ_PRINTF_VARARGS];
u64 args[MAX_SEQ_PRINTF_VARARGS];
int err, num_args;
u32 *bin_args;
if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
(data_len && !data))
return -EINVAL;
num_args = data_len / 8;
err = bpf_printf_prepare(fmt, fmt_size, data, args, mod, num_args);
err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
if (err < 0)
return err;
/* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
* all of them to seq_printf().
*/
seq_printf(m, fmt, BPF_CAST_FMT_ARG(0, args, mod),
BPF_CAST_FMT_ARG(1, args, mod), BPF_CAST_FMT_ARG(2, args, mod),
BPF_CAST_FMT_ARG(3, args, mod), BPF_CAST_FMT_ARG(4, args, mod),
BPF_CAST_FMT_ARG(5, args, mod), BPF_CAST_FMT_ARG(6, args, mod),
BPF_CAST_FMT_ARG(7, args, mod), BPF_CAST_FMT_ARG(8, args, mod),
BPF_CAST_FMT_ARG(9, args, mod), BPF_CAST_FMT_ARG(10, args, mod),
BPF_CAST_FMT_ARG(11, args, mod));
bpf_printf_cleanup();
seq_bprintf(m, fmt, bin_args);
bpf_bprintf_cleanup();
return seq_has_overflowed(m) ? -EOVERFLOW : 0;
}
......
......@@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
unsigned long long val; \
\
/* This is a so-called barrier_var() operation that makes specified \
* variable "a black box" for optimizing compiler. \
* It forces compiler to perform BYTE_OFFSET relocation on p and use \
* its calculated value in the switch below, instead of applying \
* the same relocation 4 times for each individual memory load. \
*/ \
asm volatile("" : "=r"(p) : "0"(p)); \
\
switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
case 1: val = *(const unsigned char *)p; \
case 2: val = *(const unsigned short *)p; \
case 4: val = *(const unsigned int *)p; \
case 8: val = *(const unsigned long long *)p; \
case 1: val = *(const unsigned char *)p; break; \
case 2: val = *(const unsigned short *)p; break; \
case 4: val = *(const unsigned int *)p; break; \
case 8: val = *(const unsigned long long *)p; break; \
} \
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
if (__CORE_RELO(s, field, SIGNED)) \
......
......@@ -5115,6 +5115,7 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l
* least one of enums should be anonymous;
* - for ENUMs, check sizes, names are ignored;
* - for INT, size and signedness are ignored;
* - any two FLOATs are always compatible;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - everything else shouldn't be ever a target of relocation.
......@@ -5141,6 +5142,7 @@ static int bpf_core_fields_are_compat(const struct btf *local_btf,
switch (btf_kind(local_type)) {
case BTF_KIND_PTR:
case BTF_KIND_FLOAT:
return 1;
case BTF_KIND_FWD:
case BTF_KIND_ENUM: {
......@@ -6245,8 +6247,8 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
/* bpf_core_patch_insn() should know how to handle missing targ_spec */
err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
prog->name, relo_idx, relo->insn_off, err);
pr_warn("prog '%s': relo #%d: failed to patch insn #%zu: %d\n",
prog->name, relo_idx, relo->insn_off / BPF_INSN_SZ, err);
return -EINVAL;
}
......
......@@ -9,10 +9,13 @@
#include <test_maps.h>
static int nr_cpus;
static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
int *values)
__s64 *values, bool is_pcpu)
{
int i, err;
int i, j, err;
int cpu_offset = 0;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
......@@ -20,22 +23,41 @@ static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
for (i = 0; i < max_entries; i++) {
keys[i] = i;
if (is_pcpu) {
cpu_offset = i * nr_cpus;
for (j = 0; j < nr_cpus; j++)
(values + cpu_offset)[j] = i + 1 + j;
} else {
values[i] = i + 1;
}
}
err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
}
static void map_batch_verify(int *visited, __u32 max_entries,
int *keys, int *values)
static void map_batch_verify(int *visited, __u32 max_entries, int *keys,
__s64 *values, bool is_pcpu)
{
int i;
int i, j;
int cpu_offset = 0;
memset(visited, 0, max_entries * sizeof(*visited));
for (i = 0; i < max_entries; i++) {
if (is_pcpu) {
cpu_offset = i * nr_cpus;
for (j = 0; j < nr_cpus; j++) {
__s64 value = (values + cpu_offset)[j];
CHECK(keys[i] + j + 1 != value,
"key/value checking",
"error: i %d j %d key %d value %lld\n", i,
j, keys[i], value);
}
} else {
CHECK(keys[i] + 1 != values[i], "key/value checking",
"error: i %d key %d value %d\n", i, keys[i], values[i]);
"error: i %d key %d value %lld\n", i, keys[i],
values[i]);
}
visited[i] = 1;
}
for (i = 0; i < max_entries; i++) {
......@@ -44,19 +66,21 @@ static void map_batch_verify(int *visited, __u32 max_entries,
}
}
void test_array_map_batch_ops(void)
static void __test_map_lookup_and_update_batch(bool is_pcpu)
{
struct bpf_create_map_attr xattr = {
.name = "array_map",
.map_type = BPF_MAP_TYPE_ARRAY,
.map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY :
BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.value_size = sizeof(__s64),
};
int map_fd, *keys, *values, *visited;
int map_fd, *keys, *visited;
__u32 count, total, total_success;
const __u32 max_entries = 10;
__u64 batch = 0;
int err, step;
int err, step, value_size;
void *values;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
......@@ -67,22 +91,23 @@ void test_array_map_batch_ops(void)
CHECK(map_fd == -1,
"bpf_create_map_xattr()", "error:%s\n", strerror(errno));
keys = malloc(max_entries * sizeof(int));
values = malloc(max_entries * sizeof(int));
visited = malloc(max_entries * sizeof(int));
value_size = sizeof(__s64);
if (is_pcpu)
value_size *= nr_cpus;
keys = calloc(max_entries, sizeof(*keys));
values = calloc(max_entries, value_size);
visited = calloc(max_entries, sizeof(*visited));
CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
strerror(errno));
/* populate elements to the map */
map_batch_update(map_fd, max_entries, keys, values);
/* test 1: lookup in a loop with various steps. */
total_success = 0;
for (step = 1; step < max_entries; step++) {
map_batch_update(map_fd, max_entries, keys, values);
map_batch_verify(visited, max_entries, keys, values);
map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
map_batch_verify(visited, max_entries, keys, values, is_pcpu);
memset(keys, 0, max_entries * sizeof(*keys));
memset(values, 0, max_entries * sizeof(*values));
memset(values, 0, max_entries * value_size);
batch = 0;
total = 0;
/* iteratively lookup/delete elements with 'step'
......@@ -91,9 +116,9 @@ void test_array_map_batch_ops(void)
count = step;
while (true) {
err = bpf_map_lookup_batch(map_fd,
total ? &batch : NULL, &batch,
keys + total,
values + total,
total ? &batch : NULL,
&batch, keys + total,
values + total * value_size,
&count, &opts);
CHECK((err && errno != ENOENT), "lookup with steps",
......@@ -108,7 +133,7 @@ void test_array_map_batch_ops(void)
CHECK(total != max_entries, "lookup with steps",
"total = %u, max_entries = %u\n", total, max_entries);
map_batch_verify(visited, max_entries, keys, values);
map_batch_verify(visited, max_entries, keys, values, is_pcpu);
total_success++;
}
......@@ -116,9 +141,30 @@ void test_array_map_batch_ops(void)
CHECK(total_success == 0, "check total_success",
"unexpected failure\n");
printf("%s:PASS\n", __func__);
free(keys);
free(values);
free(visited);
}
static void array_map_batch_ops(void)
{
__test_map_lookup_and_update_batch(false);
printf("test_%s:PASS\n", __func__);
}
static void array_percpu_map_batch_ops(void)
{
__test_map_lookup_and_update_batch(true);
printf("test_%s:PASS\n", __func__);
}
void test_array_map_batch_ops(void)
{
nr_cpus = libbpf_num_possible_cpus();
CHECK(nr_cpus < 0, "nr_cpus checking",
"error: get possible cpus failed");
array_map_batch_ops();
array_percpu_map_batch_ops();
}
......@@ -77,7 +77,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
if (!ASSERT_GE(fd, 0, "create_tmp")) {
err = fd;
goto done;
}
......
......@@ -6,8 +6,6 @@
#include <test_progs.h>
#include <bpf/btf.h>
static int duration = 0;
void test_btf_endian() {
#if __BYTE_ORDER == __LITTLE_ENDIAN
enum btf_endianness endian = BTF_LITTLE_ENDIAN;
......@@ -71,7 +69,7 @@ void test_btf_endian() {
/* now modify original BTF */
var_id = btf__add_var(btf, "some_var", BTF_VAR_GLOBAL_ALLOCATED, 1);
CHECK(var_id <= 0, "var_id", "failed %d\n", var_id);
ASSERT_GT(var_id, 0, "var_id");
btf__free(swap_btf);
swap_btf = NULL;
......
......@@ -54,7 +54,7 @@ void test_cgroup_link(void)
for (i = 0; i < cg_nr; i++) {
cgs[i].fd = create_and_get_cgroup(cgs[i].path);
if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
if (!ASSERT_GE(cgs[i].fd, 0, "cg_create"))
goto cleanup;
}
......
......@@ -210,11 +210,6 @@ static int duration = 0;
.bpf_obj_file = "test_core_reloc_existence.o", \
.btf_src_file = "btf__core_reloc_" #name ".o" \
#define FIELD_EXISTS_ERR_CASE(name) { \
FIELD_EXISTS_CASE_COMMON(name), \
.fails = true, \
}
#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
.case_name = test_name_prefix#name, \
.bpf_obj_file = objfile, \
......@@ -222,7 +217,7 @@ static int duration = 0;
#define BITFIELDS_CASE(name, ...) { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
"direct:", name), \
"probed:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
......@@ -230,7 +225,7 @@ static int duration = 0;
.output_len = sizeof(struct core_reloc_bitfields_output), \
}, { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
"probed:", name), \
"direct:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
......@@ -551,8 +546,7 @@ static struct core_reloc_test_case test_cases[] = {
ARRAYS_ERR_CASE(arrays___err_too_small),
ARRAYS_ERR_CASE(arrays___err_too_shallow),
ARRAYS_ERR_CASE(arrays___err_non_array),
ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
/* enum/ptr/int handling scenarios */
......@@ -643,13 +637,25 @@ static struct core_reloc_test_case test_cases[] = {
},
.output_len = sizeof(struct core_reloc_existence_output),
},
FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
FIELD_EXISTS_ERR_CASE(existence__err_int_type),
FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
{
FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
},
.input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
.a_exists = 0,
.b_exists = 0,
.c_exists = 0,
.arr_exists = 0,
.s_exists = 0,
.a_value = 0xff000001u,
.b_value = 0xff000002u,
.c_value = 0xff000003u,
.arr_value = 0xff000004u,
.s_value = 0xff000005u,
},
.output_len = sizeof(struct core_reloc_existence_output),
},
/* bitfield relocation checks */
BITFIELDS_CASE(bitfields, {
......@@ -858,13 +864,20 @@ void test_core_reloc(void)
"prog '%s' not found\n", probe_name))
goto cleanup;
if (test_case->btf_src_file) {
err = access(test_case->btf_src_file, R_OK);
if (!ASSERT_OK(err, "btf_src_file"))
goto cleanup;
}
load_attr.obj = obj;
load_attr.log_level = 0;
load_attr.target_btf_path = test_case->btf_src_file;
err = bpf_object__load_xattr(&load_attr);
if (err) {
if (!test_case->fails)
CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
ASSERT_OK(err, "obj_load");
goto cleanup;
}
......@@ -903,10 +916,8 @@ void test_core_reloc(void)
goto cleanup;
}
if (test_case->fails) {
CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
goto cleanup;
}
equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
......
......@@ -3,35 +3,57 @@
#include <test_progs.h>
#include "fentry_test.skel.h"
void test_fentry_test(void)
static int fentry_test(struct fentry_test *fentry_skel)
{
struct fentry_test *fentry_skel = NULL;
int err, prog_fd, i;
__u32 duration = 0, retval;
struct bpf_link *link;
__u64 *result;
fentry_skel = fentry_test__open_and_load();
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto cleanup;
err = fentry_test__attach(fentry_skel);
if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
goto cleanup;
if (!ASSERT_OK(err, "fentry_attach"))
return err;
/* Check that already linked program can't be attached again. */
link = bpf_program__attach(fentry_skel->progs.test1);
if (!ASSERT_ERR_PTR(link, "fentry_attach_link"))
return -1;
prog_fd = bpf_program__fd(fentry_skel->progs.test1);
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
CHECK(err || retval, "test_run",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
ASSERT_OK(err, "test_run");
ASSERT_EQ(retval, 0, "test_run");
result = (__u64 *)fentry_skel->bss;
for (i = 0; i < 6; i++) {
if (CHECK(result[i] != 1, "result",
"fentry_test%d failed err %lld\n", i + 1, result[i]))
goto cleanup;
for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) {
if (!ASSERT_EQ(result[i], 1, "fentry_result"))
return -1;
}
fentry_test__detach(fentry_skel);
/* zero results for re-attach test */
memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss));
return 0;
}
void test_fentry_test(void)
{
struct fentry_test *fentry_skel = NULL;
int err;
fentry_skel = fentry_test__open_and_load();
if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
goto cleanup;
err = fentry_test(fentry_skel);
if (!ASSERT_OK(err, "fentry_first_attach"))
goto cleanup;
err = fentry_test(fentry_skel);
ASSERT_OK(err, "fentry_second_attach");
cleanup:
fentry_test__destroy(fentry_skel);
}
......@@ -3,35 +3,57 @@
#include <test_progs.h>
#include "fexit_test.skel.h"
void test_fexit_test(void)
static int fexit_test(struct fexit_test *fexit_skel)
{
struct fexit_test *fexit_skel = NULL;
int err, prog_fd, i;
__u32 duration = 0, retval;
struct bpf_link *link;
__u64 *result;
fexit_skel = fexit_test__open_and_load();
if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
goto cleanup;
err = fexit_test__attach(fexit_skel);
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto cleanup;
if (!ASSERT_OK(err, "fexit_attach"))
return err;
/* Check that already linked program can't be attached again. */
link = bpf_program__attach(fexit_skel->progs.test1);
if (!ASSERT_ERR_PTR(link, "fexit_attach_link"))
return -1;
prog_fd = bpf_program__fd(fexit_skel->progs.test1);
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
CHECK(err || retval, "test_run",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
ASSERT_OK(err, "test_run");
ASSERT_EQ(retval, 0, "test_run");
result = (__u64 *)fexit_skel->bss;
for (i = 0; i < 6; i++) {
if (CHECK(result[i] != 1, "result",
"fexit_test%d failed err %lld\n", i + 1, result[i]))
goto cleanup;
for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) {
if (!ASSERT_EQ(result[i], 1, "fexit_result"))
return -1;
}
fexit_test__detach(fexit_skel);
/* zero results for re-attach test */
memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss));
return 0;
}
void test_fexit_test(void)
{
struct fexit_test *fexit_skel = NULL;
int err;
fexit_skel = fexit_test__open_and_load();
if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
goto cleanup;
err = fexit_test(fexit_skel);
if (!ASSERT_OK(err, "fexit_first_attach"))
goto cleanup;
err = fexit_test(fexit_skel);
ASSERT_OK(err, "fexit_second_attach");
cleanup:
fexit_test__destroy(fexit_skel);
}
......@@ -134,7 +134,7 @@ void test_kfree_skb(void)
/* make sure kfree_skb program was triggered
* and it sent expected skb into ring buffer
*/
CHECK_FAIL(!passed);
ASSERT_TRUE(passed, "passed");
err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok);
if (CHECK(err, "get_result",
......
......@@ -45,12 +45,18 @@ static int trigger_module_test_write(int write_sz)
return 0;
}
static int delete_module(const char *name, int flags)
{
return syscall(__NR_delete_module, name, flags);
}
void test_module_attach(void)
{
const int READ_SZ = 456;
const int WRITE_SZ = 457;
struct test_module_attach* skel;
struct test_module_attach__bss *bss;
struct bpf_link *link;
int err;
skel = test_module_attach__open();
......@@ -84,6 +90,23 @@ void test_module_attach(void)
ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet");
ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret");
test_module_attach__detach(skel);
/* attach fentry/fexit and make sure it get's module reference */
link = bpf_program__attach(skel->progs.handle_fentry);
if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
bpf_link__destroy(link);
link = bpf_program__attach(skel->progs.handle_fexit);
if (!ASSERT_OK_PTR(link, "attach_fexit"))
goto cleanup;
ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
bpf_link__destroy(link);
cleanup:
test_module_attach__destroy(skel);
}
......@@ -160,11 +160,8 @@ int test_resolve_btfids(void)
break;
if (i > 0) {
ret = CHECK(test_set.ids[i - 1] > test_set.ids[i],
"sort_check",
"test_set is not sorted\n");
if (ret)
break;
if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check"))
return -1;
}
}
......
......@@ -42,9 +42,7 @@ void test_snprintf_btf(void)
* and it set expected return values from bpf_trace_printk()s
* and all tests ran.
*/
if (CHECK(bss->ret <= 0,
"bpf_snprintf_btf: got return value",
"ret <= 0 %ld test %d\n", bss->ret, bss->ran_subtests))
if (!ASSERT_GT(bss->ret, 0, "bpf_snprintf_ret"))
goto cleanup;
if (CHECK(bss->ran_subtests == 0, "check if subtests ran",
......
......@@ -51,43 +51,64 @@ int exec_cmd(int *monitored_pid)
return -EINVAL;
}
void test_test_lsm(void)
static int test_lsm(struct lsm *skel)
{
struct lsm *skel = NULL;
int err, duration = 0;
struct bpf_link *link;
int buf = 1234;
skel = lsm__open_and_load();
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
goto close_prog;
int err;
err = lsm__attach(skel);
if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
goto close_prog;
if (!ASSERT_OK(err, "attach"))
return err;
/* Check that already linked program can't be attached again. */
link = bpf_program__attach(skel->progs.test_int_hook);
if (!ASSERT_ERR_PTR(link, "attach_link"))
return -1;
err = exec_cmd(&skel->bss->monitored_pid);
if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno))
goto close_prog;
if (!ASSERT_OK(err, "exec_cmd"))
return err;
CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n",
skel->bss->bprm_count);
ASSERT_EQ(skel->bss->bprm_count, 1, "bprm_count");
skel->bss->monitored_pid = getpid();
err = stack_mprotect();
if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n",
errno))
goto close_prog;
if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
return err;
CHECK(skel->bss->mprotect_count != 1, "mprotect_count",
"mprotect_count = %d\n", skel->bss->mprotect_count);
ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count");
syscall(__NR_setdomainname, &buf, -2L);
syscall(__NR_setdomainname, 0, -3L);
syscall(__NR_setdomainname, ~0L, -4L);
CHECK(skel->bss->copy_test != 3, "copy_test",
"copy_test = %d\n", skel->bss->copy_test);
ASSERT_EQ(skel->bss->copy_test, 3, "copy_test");
lsm__detach(skel);
skel->bss->copy_test = 0;
skel->bss->bprm_count = 0;
skel->bss->mprotect_count = 0;
return 0;
}
void test_test_lsm(void)
{
struct lsm *skel = NULL;
int err;
skel = lsm__open_and_load();
if (!ASSERT_OK_PTR(skel, "lsm_skel_load"))
goto close_prog;
err = test_lsm(skel);
if (!ASSERT_OK(err, "test_lsm_first_attach"))
goto close_prog;
err = test_lsm(skel);
ASSERT_OK(err, "test_lsm_second_attach");
close_prog:
lsm__destroy(skel);
......
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_int_kind x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_int_type x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_struct_type x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_int_sz x) {}
void f(struct core_reloc_existence___wrong_field_defs x) {}
......@@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
int a;
};
struct core_reloc_existence___err_wrong_int_sz {
short a;
};
struct core_reloc_existence___err_wrong_int_type {
struct core_reloc_existence___wrong_field_defs {
void *a;
int b[1];
};
struct core_reloc_existence___err_wrong_int_kind {
struct{ int x; } c;
};
struct core_reloc_existence___err_wrong_arr_kind {
int arr;
};
struct core_reloc_existence___err_wrong_arr_value_type {
short arr[1];
};
struct core_reloc_existence___err_wrong_struct_type {
int s;
};
......
......@@ -130,6 +130,20 @@ extern int test__join_cgroup(const char *path);
#define CHECK_ATTR(condition, tag, format...) \
_CHECK(condition, tag, tattr.duration, format)
#define ASSERT_TRUE(actual, name) ({ \
static int duration = 0; \
bool ___ok = (actual); \
CHECK(!___ok, (name), "unexpected %s: got FALSE\n", (name)); \
___ok; \
})
#define ASSERT_FALSE(actual, name) ({ \
static int duration = 0; \
bool ___ok = !(actual); \
CHECK(!___ok, (name), "unexpected %s: got TRUE\n", (name)); \
___ok; \
})
#define ASSERT_EQ(actual, expected, name) ({ \
static int duration = 0; \
typeof(actual) ___act = (actual); \
......@@ -163,6 +177,39 @@ extern int test__join_cgroup(const char *path);
___ok; \
})
#define ASSERT_LE(actual, expected, name) ({ \
static int duration = 0; \
typeof(actual) ___act = (actual); \
typeof(expected) ___exp = (expected); \
bool ___ok = ___act <= ___exp; \
CHECK(!___ok, (name), \
"unexpected %s: actual %lld > expected %lld\n", \
(name), (long long)(___act), (long long)(___exp)); \
___ok; \
})
#define ASSERT_GT(actual, expected, name) ({ \
static int duration = 0; \
typeof(actual) ___act = (actual); \
typeof(expected) ___exp = (expected); \
bool ___ok = ___act > ___exp; \
CHECK(!___ok, (name), \
"unexpected %s: actual %lld <= expected %lld\n", \
(name), (long long)(___act), (long long)(___exp)); \
___ok; \
})
#define ASSERT_GE(actual, expected, name) ({ \
static int duration = 0; \
typeof(actual) ___act = (actual); \
typeof(expected) ___exp = (expected); \
bool ___ok = ___act >= ___exp; \
CHECK(!___ok, (name), \
"unexpected %s: actual %lld < expected %lld\n", \
(name), (long long)(___act), (long long)(___exp)); \
___ok; \
})
#define ASSERT_STREQ(actual, expected, name) ({ \
static int duration = 0; \
const char *___act = actual; \
......@@ -178,7 +225,8 @@ extern int test__join_cgroup(const char *path);
static int duration = 0; \
long long ___res = (res); \
bool ___ok = ___res == 0; \
CHECK(!___ok, (name), "unexpected error: %lld\n", ___res); \
CHECK(!___ok, (name), "unexpected error: %lld (errno %d)\n", \
___res, errno); \
___ok; \
})
......
......@@ -186,7 +186,7 @@
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
.errstr = "R0 unbounded memory access",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment